From d9dc079d819b941a8af03d164eb2dd43a1f96290 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 16:45:05 +0000 Subject: [PATCH] Bump github.com/tektoncd/pipeline from 0.45.0 to 0.47.0 Bumps [github.com/tektoncd/pipeline](https://github.com/tektoncd/pipeline) from 0.45.0 to 0.47.0. - [Release notes](https://github.com/tektoncd/pipeline/releases) - [Changelog](https://github.com/tektoncd/pipeline/blob/main/releases.md) - [Commits](https://github.com/tektoncd/pipeline/compare/v0.45.0...v0.47.0) --- updated-dependencies: - dependency-name: github.com/tektoncd/pipeline dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 64 +- go.sum | 156 +- .../go/compute/internal/version.go | 2 +- .../github.com/aws/aws-sdk-go-v2/CHANGELOG.md | 2795 ++++++++++++++++- .../aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md | 4 +- .../aws/aws-sdk-go-v2/CONTRIBUTING.md | 35 +- vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md | 2 +- vendor/github.com/aws/aws-sdk-go-v2/Makefile | 18 +- .../github.com/aws/aws-sdk-go-v2/NOTICE.txt | 2 +- vendor/github.com/aws/aws-sdk-go-v2/README.md | 19 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/aws/protocol/query/array.go | 11 + .../aws/protocol/xml/error_utils.go | 16 +- .../aws/ratelimit/token_rate_limit.go | 4 - .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 5 +- .../aws/retry/retryable_error.go | 5 + .../aws/signer/internal/v4/headers.go | 1 + .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 52 + .../config/go_module_metadata.go | 2 +- .../config/resolve_credentials.go | 24 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 48 + .../credentials/ec2rolecreds/doc.go | 2 +- .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 24 +- .../feature/ec2/imds/CHANGELOG.md | 24 + .../feature/ec2/imds/api_client.go | 10 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../feature/ec2/imds/token_provider.go | 82 +- .../internal/configsources/CHANGELOG.md | 20 + .../configsources/go_module_metadata.go | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 20 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 20 + .../internal/ini/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 20 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 33 + .../aws-sdk-go-v2/service/sso/api_client.go | 2 +- .../service/sso/api_op_GetRoleCredentials.go | 7 +- .../service/sso/api_op_ListAccountRoles.go | 3 +- .../service/sso/api_op_ListAccounts.go | 6 +- .../service/sso/api_op_Logout.go | 8 +- .../service/sso/deserializers.go | 48 +- .../aws/aws-sdk-go-v2/service/sso/doc.go | 9 +- .../service/sso/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sso/types/errors.go | 8 +- .../aws-sdk-go-v2/service/sso/types/types.go | 9 +- .../service/ssooidc/CHANGELOG.md | 33 + .../service/ssooidc/api_client.go | 2 +- .../service/ssooidc/api_op_CreateToken.go | 19 +- .../service/ssooidc/api_op_RegisterClient.go | 8 +- .../api_op_StartDeviceAuthorization.go | 15 +- .../service/ssooidc/deserializers.go | 36 +- .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 54 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../service/ssooidc/types/errors.go | 34 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 37 + .../aws-sdk-go-v2/service/sts/api_client.go | 2 +- .../service/sts/api_op_AssumeRole.go | 162 +- .../service/sts/api_op_AssumeRoleWithSAML.go | 135 +- .../sts/api_op_AssumeRoleWithWebIdentity.go | 165 +- .../sts/api_op_DecodeAuthorizationMessage.go | 32 +- .../service/sts/api_op_GetAccessKeyInfo.go | 13 +- .../service/sts/api_op_GetCallerIdentity.go | 12 +- .../service/sts/api_op_GetFederationToken.go | 142 +- .../service/sts/api_op_GetSessionToken.go | 72 +- .../aws/aws-sdk-go-v2/service/sts/doc.go | 4 +- .../service/sts/go_module_metadata.go | 2 +- .../sts/internal/endpoints/endpoints.go | 3 + .../aws-sdk-go-v2/service/sts/serializers.go | 9 - .../aws-sdk-go-v2/service/sts/types/errors.go | 35 +- .../aws-sdk-go-v2/service/sts/types/types.go | 32 +- .../stargz-snapshotter/estargz/build.go | 48 +- .../stargz-snapshotter/estargz/estargz.go | 224 +- .../stargz-snapshotter/estargz/gzip.go | 2 +- .../stargz-snapshotter/estargz/testutil.go | 629 +++- .../stargz-snapshotter/estargz/types.go | 31 +- vendor/github.com/docker/cli/AUTHORS | 101 +- .../docker/cli/cli/config/config.go | 20 +- .../docker/cli/cli/config/configfile/file.go | 87 +- .../cli/cli/config/configfile/file_unix.go | 2 +- .../cli/cli/config/credentials/file_store.go | 5 +- vendor/github.com/docker/docker/AUTHORS | 283 +- .../docker/pkg/homedir/homedir_linux.go | 9 + .../docker/pkg/homedir/homedir_others.go | 5 + .../go-jose/go-jose/v3/jwt/builder.go | 334 ++ .../go-jose/go-jose/v3/jwt/claims.go | 130 + .../github.com/go-jose/go-jose/v3/jwt/doc.go | 22 + .../go-jose/go-jose/v3/jwt/errors.go | 53 + .../github.com/go-jose/go-jose/v3/jwt/jwt.go | 133 + .../go-jose/go-jose/v3/jwt/validation.go | 120 + .../go-containerregistry/pkg/v1/config.go | 15 + .../pkg/v1/empty/index.go | 1 + .../go-containerregistry/pkg/v1/platform.go | 41 + .../pkg/v1/tarball/write.go | 32 +- .../pkg/v1/zz_deepcopy_generated.go | 15 + .../klauspost/compress/.goreleaser.yml | 2 +- .../github.com/klauspost/compress/README.md | 30 +- .../klauspost/compress/fse/compress.go | 31 +- .../klauspost/compress/huff0/bitreader.go | 8 +- .../klauspost/compress/huff0/compress.go | 114 +- .../klauspost/compress/huff0/decompress.go | 2 +- .../compress/huff0/decompress_amd64.s | 584 ++-- .../compress/internal/snapref/encode_other.go | 22 + .../klauspost/compress/zstd/blockdec.go | 16 +- .../klauspost/compress/zstd/decodeheader.go | 9 +- .../klauspost/compress/zstd/decoder.go | 95 +- .../compress/zstd/decoder_options.go | 26 +- .../klauspost/compress/zstd/dict.go | 51 +- .../klauspost/compress/zstd/enc_base.go | 28 +- .../klauspost/compress/zstd/enc_best.go | 63 +- .../klauspost/compress/zstd/enc_better.go | 12 +- .../klauspost/compress/zstd/enc_dfast.go | 16 +- .../klauspost/compress/zstd/enc_fast.go | 14 +- .../klauspost/compress/zstd/encoder.go | 35 + .../compress/zstd/encoder_options.go | 36 +- .../klauspost/compress/zstd/framedec.go | 58 +- .../compress/zstd/internal/xxhash/README.md | 49 +- .../compress/zstd/internal/xxhash/xxhash.go | 47 +- .../zstd/internal/xxhash/xxhash_amd64.s | 336 +- .../zstd/internal/xxhash/xxhash_arm64.s | 140 +- .../zstd/internal/xxhash/xxhash_asm.go | 2 +- .../zstd/internal/xxhash/xxhash_other.go | 19 +- .../klauspost/compress/zstd/seqdec_amd64.s | 28 +- .../klauspost/compress/zstd/zstd.go | 31 +- .../cjson/canonicaljson.go | 46 +- .../go-securesystemslib/dsse/sign.go | 11 +- .../go-securesystemslib/dsse/verify.go | 13 +- .../sigstore/sigstore/pkg/oauthflow/flow.go | 2 +- .../sigstore/pkg/signature/dsse/adapters.go | 12 +- .../sigstore/pkg/signature/dsse/dsse.go | 4 +- .../sigstore/pkg/signature/dsse/multidsse.go | 5 +- .../sigstore/sigstore/pkg/tuf/client.go | 2 +- .../go-spiffe/v2/bundle/jwtbundle/bundle.go | 2 +- .../v2/bundle/spiffebundle/bundle.go | 2 +- .../v2/proto/spiffe/workload/workload.pb.go | 169 +- .../v2/proto/spiffe/workload/workload.proto | 12 + .../spiffe/go-spiffe/v2/spiffeid/path.go | 12 +- .../spiffe/go-spiffe/v2/svid/jwtsvid/svid.go | 7 +- .../spiffe/go-spiffe/v2/svid/x509svid/svid.go | 4 + .../spiffe/go-spiffe/v2/workloadapi/client.go | 23 +- .../pkg/apis/config/artifact_bucket.go | 112 - .../pipeline/pkg/apis/config/artifact_pvc.go | 86 - .../pipeline/pkg/apis/config/default.go | 10 + .../pipeline/pkg/apis/config/feature_flags.go | 152 +- .../pipeline/pkg/apis/config/store.go | 61 +- .../pkg/apis/config/trusted_resources.go | 72 - .../pkg/apis/config/zz_generated.deepcopy.go | 56 - .../pipeline/pkg/apis/pipeline/controller.go | 2 - .../pipeline/pkg/apis/pipeline/images.go | 9 - .../pipeline/pkg/apis/pipeline/register.go | 9 +- .../pkg/apis/pipeline/v1/matrix_types.go | 362 +++ .../pipeline/pkg/apis/pipeline/v1/merge.go | 60 + .../pkg/apis/pipeline/v1/openapi_generated.go | 276 +- .../pkg/apis/pipeline/v1/param_types.go | 309 +- .../pkg/apis/pipeline/v1/pipeline_defaults.go | 31 +- .../pkg/apis/pipeline/v1/pipeline_types.go | 314 +- .../apis/pipeline/v1/pipeline_validation.go | 334 +- .../pipeline/v1/pipelineref_validation.go | 2 +- .../apis/pipeline/v1/pipelinerun_defaults.go | 9 +- .../pkg/apis/pipeline/v1/pipelinerun_types.go | 12 +- .../pipeline/v1/pipelinerun_validation.go | 2 +- .../pkg/apis/pipeline/v1/provenance.go | 29 +- .../pkg/apis/pipeline/v1/resolver_types.go | 2 +- .../pkg/apis/pipeline/v1/result_types.go | 2 +- .../pkg/apis/pipeline/v1/result_validation.go | 4 +- .../pkg/apis/pipeline/v1/resultref.go | 8 +- .../pkg/apis/pipeline/v1/swagger.json | 156 +- .../pkg/apis/pipeline/v1/task_types.go | 7 +- .../pkg/apis/pipeline/v1/task_validation.go | 44 +- .../pkg/apis/pipeline/v1/taskref_types.go | 12 +- .../apis/pipeline/v1/taskref_validation.go | 2 +- .../pkg/apis/pipeline/v1/taskrun_defaults.go | 9 +- .../pkg/apis/pipeline/v1/taskrun_types.go | 9 +- .../apis/pipeline/v1/taskrun_validation.go | 8 +- .../pkg/apis/pipeline/v1/workspace_types.go | 1 + .../apis/pipeline/v1/zz_generated.deepcopy.go | 202 +- .../pkg/apis/pipeline/v1alpha1/run_types.go | 2 +- .../v1alpha1/verificationpolicy_types.go | 14 + .../v1alpha1/verificationpolicy_validation.go | 3 + .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../pipeline/v1beta1/cluster_task_types.go | 4 +- .../pipeline/v1beta1/container_conversion.go | 16 + .../apis/pipeline/v1beta1/container_types.go | 93 +- .../apis/pipeline/v1beta1/customrun_types.go | 4 +- .../pkg/apis/pipeline/v1beta1/matrix_types.go | 362 +++ .../pipeline/v1beta1/openapi_generated.go | 698 ++-- .../apis/pipeline/v1beta1/param_conversion.go | 16 + .../pkg/apis/pipeline/v1beta1/param_types.go | 311 +- .../pipeline/v1beta1/pipeline_conversion.go | 50 +- .../pipeline/v1beta1/pipeline_defaults.go | 31 +- .../pipeline/v1beta1/pipeline_interface.go | 2 +- .../apis/pipeline/v1beta1/pipeline_types.go | 404 +-- .../pipeline/v1beta1/pipeline_validation.go | 467 ++- .../v1beta1/pipelineref_conversion.go | 18 +- .../pipeline/v1beta1/pipelineref_types.go | 1 + .../v1beta1/pipelineref_validation.go | 2 +- .../v1beta1/pipelinerun_conversion.go | 27 - .../pipeline/v1beta1/pipelinerun_defaults.go | 4 + .../pipeline/v1beta1/pipelinerun_types.go | 64 +- .../v1beta1/pipelinerun_validation.go | 5 +- .../pkg/apis/pipeline/v1beta1/provenance.go | 46 +- .../pipeline/v1beta1/provenance_conversion.go | 20 +- .../pipeline/v1beta1/resolver_conversion.go | 16 + .../apis/pipeline/v1beta1/resolver_types.go | 2 +- .../apis/pipeline/v1beta1/resource_paths.go | 40 - .../apis/pipeline/v1beta1/resource_types.go | 250 +- .../v1beta1/resource_types_validation.go | 101 - .../pipeline/v1beta1/result_conversion.go | 16 + .../pkg/apis/pipeline/v1beta1/result_types.go | 2 +- .../pipeline/v1beta1/result_validation.go | 4 +- .../pkg/apis/pipeline/v1beta1/resultref.go | 8 +- .../pkg/apis/pipeline/v1beta1/swagger.json | 445 ++- .../apis/pipeline/v1beta1/task_conversion.go | 31 +- .../pkg/apis/pipeline/v1beta1/task_types.go | 20 +- .../apis/pipeline/v1beta1/task_validation.go | 68 +- .../pipeline/v1beta1/taskref_conversion.go | 18 +- .../apis/pipeline/v1beta1/taskref_types.go | 14 +- .../pipeline/v1beta1/taskref_validation.go | 2 +- .../pipeline/v1beta1/taskrun_conversion.go | 50 - .../apis/pipeline/v1beta1/taskrun_defaults.go | 9 +- .../apis/pipeline/v1beta1/taskrun_types.go | 31 +- .../pipeline/v1beta1/taskrun_validation.go | 12 +- .../pipeline/v1beta1/workspace_conversion.go | 16 + .../apis/pipeline/v1beta1/workspace_types.go | 1 + .../pipeline/v1beta1/zz_generated.deepcopy.go | 239 +- .../pkg/apis/resource/v1alpha1/doc.go | 4 - .../v1alpha1/pipeline_resource_defaults.go | 34 - .../v1alpha1/pipeline_resource_types.go | 52 +- .../v1alpha1/pipelineresource_validation.go | 81 - .../pkg/apis/resource/v1alpha1/register.go | 3 - .../pipeline/pkg/apis/version/conversion.go | 4 +- .../clientset/versioned/scheme/register.go | 14 +- .../tektoncd/pipeline/pkg/result/result.go | 92 + .../pipeline/pkg/substitution/substitution.go | 2 +- .../tektoncd/pipeline/test/parse/yaml.go | 24 - vendor/go.opentelemetry.io/otel/.lycheeignore | 2 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 66 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 2 +- vendor/go.opentelemetry.io/otel/Makefile | 5 +- vendor/go.opentelemetry.io/otel/README.md | 5 + .../otel/attribute/value.go | 16 +- .../otel/internal/attribute/attribute.go | 82 +- .../go.opentelemetry.io/otel/trace/config.go | 17 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 7 +- vendor/golang.org/x/exp/maps/maps.go | 94 + vendor/golang.org/x/exp/slices/slices.go | 52 +- vendor/golang.org/x/exp/slices/sort.go | 51 +- vendor/golang.org/x/oauth2/README.md | 12 +- vendor/golang.org/x/oauth2/google/default.go | 16 +- vendor/golang.org/x/oauth2/google/doc.go | 61 +- vendor/golang.org/x/oauth2/google/google.go | 6 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/token.go | 14 +- .../x/tools/go/types/objectpath/objectpath.go | 762 +++++ .../x/tools/internal/gcimporter/gcimporter.go | 12 + .../tools/internal/gcimporter/ureader_yes.go | 41 +- .../x/tools/internal/typeparams/common.go | 1 - .../x/tools/internal/typesinternal/types.go | 9 + .../google.golang.org/api/idtoken/idtoken.go | 7 +- .../cert/default_cert.go | 0 .../cert/enterprise_cert.go | 0 .../cert/secureconnect_cert.go | 0 .../google.golang.org/api/internal/creds.go | 76 +- .../internal/dca => internal}/dca.go | 13 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/transport/http/dial.go | 12 +- .../googleapis/api/httpbody/httpbody.pb.go | 4 +- vendor/k8s.io/klog/v2/contextual.go | 30 +- .../k8s.io/klog/v2/internal/buffer/buffer.go | 41 +- .../klog/v2/internal/serialize/keyvalues.go | 46 +- vendor/k8s.io/klog/v2/klog.go | 97 +- .../k8s.io/kube-openapi/pkg/cached/cache.go | 264 ++ .../kube-openapi/pkg/handler3/handler.go | 235 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 4 + .../pkg/internal/handler/handler_cache.go | 57 - .../pkg/internal/serialization.go | 65 + .../go-json-experiment/json/arshal.go | 7 + .../go-json-experiment/json/arshal_any.go | 31 +- .../go-json-experiment/json/arshal_default.go | 147 +- .../go-json-experiment/json/arshal_inlined.go | 57 +- .../go-json-experiment/json/arshal_methods.go | 4 +- .../go-json-experiment/json/arshal_time.go | 99 +- .../go-json-experiment/json/decode.go | 12 +- .../go-json-experiment/json/encode.go | 24 + .../go-json-experiment/json/pools.go | 32 + .../go-json-experiment/json/state.go | 4 +- .../go-json-experiment/json/token.go | 10 +- .../go-json-experiment/json/value.go | 56 +- .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 20 + .../k8s.io/kube-openapi/pkg/spec3/example.go | 23 + .../pkg/spec3/external_documentation.go | 19 + vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 254 ++ .../k8s.io/kube-openapi/pkg/spec3/header.go | 21 + .../kube-openapi/pkg/spec3/media_type.go | 20 + .../kube-openapi/pkg/spec3/operation.go | 18 + .../kube-openapi/pkg/spec3/parameter.go | 22 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 80 + .../kube-openapi/pkg/spec3/request_body.go | 21 + .../k8s.io/kube-openapi/pkg/spec3/response.go | 118 +- .../k8s.io/kube-openapi/pkg/spec3/server.go | 38 + vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 13 + .../pkg/validation/spec/header.go | 23 +- .../kube-openapi/pkg/validation/spec/info.go | 33 +- .../kube-openapi/pkg/validation/spec/items.go | 53 +- .../pkg/validation/spec/operation.go | 36 +- .../pkg/validation/spec/parameter.go | 36 +- .../pkg/validation/spec/path_item.go | 28 +- .../kube-openapi/pkg/validation/spec/paths.go | 20 +- .../kube-openapi/pkg/validation/spec/ref.go | 18 +- .../pkg/validation/spec/response.go | 36 +- .../pkg/validation/spec/responses.go | 24 +- .../pkg/validation/spec/schema.go | 79 +- .../pkg/validation/spec/security_scheme.go | 20 +- .../pkg/validation/spec/swagger.go | 82 +- .../kube-openapi/pkg/validation/spec/tag.go | 19 +- vendor/modules.txt | 79 +- 318 files changed, 14561 insertions(+), 5910 deletions(-) create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/builder.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/claims.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/errors.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/jwt.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/validation.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_bucket.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_pvc.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/trusted_resources.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_paths.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_defaults.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipelineresource_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/result/result.go create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go rename vendor/google.golang.org/api/{transport => internal}/cert/default_cert.go (100%) rename vendor/google.golang.org/api/{transport => internal}/cert/enterprise_cert.go (100%) rename vendor/google.golang.org/api/{transport => internal}/cert/secureconnect_cert.go (100%) rename vendor/google.golang.org/api/{transport/internal/dca => internal}/dca.go (92%) create mode 100644 vendor/k8s.io/kube-openapi/pkg/cached/cache.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/serialization.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go diff --git a/go.mod b/go.mod index ebf0102102..e67145f584 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/sigstore/cosign/v2 v2.0.0 github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.8.2 - github.com/tektoncd/pipeline v0.45.0 + github.com/tektoncd/pipeline v0.47.0 github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7 github.com/tektoncd/triggers v0.23.1 github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 @@ -36,7 +36,7 @@ require ( gotest.tools/v3 v3.4.0 k8s.io/api v0.26.1 k8s.io/apiextensions-apiserver v0.26.1 - k8s.io/apimachinery v0.26.1 + k8s.io/apimachinery v0.26.4 k8s.io/client-go v0.26.1 k8s.io/code-generator v0.26.1 knative.dev/pkg v0.0.0-20230224205330-75da922ef055 @@ -57,7 +57,7 @@ replace k8s.io/client-go => k8s.io/client-go v0.25.5 replace k8s.io/code-generator => k8s.io/code-generator v0.25.5 require ( - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect @@ -89,19 +89,19 @@ require ( github.com/alibabacloud-go/tea-xml v1.1.2 // indirect github.com/aliyun/credentials-go v1.2.3 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go-v2 v1.17.3 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect + github.com/aws/aws-sdk-go-v2 v1.17.8 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.21 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.20 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.33 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.26 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.18.9 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3 // indirect github.com/benbjohnson/clock v1.1.0 // indirect @@ -115,16 +115,16 @@ require ( github.com/clbanning/mxj/v2 v2.5.6 // indirect github.com/cockroachdb/apd/v2 v2.0.1 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect - github.com/containerd/stargz-snapshotter/estargz v0.12.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/coreos/go-oidc/v3 v3.5.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20210823021906-dc406ceaf94b // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3 // indirect github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v20.10.21+incompatible // indirect + github.com/docker/cli v23.0.1+incompatible // indirect github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.24+incompatible // indirect + github.com/docker/docker v23.0.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/emicklei/proto v1.6.15 // indirect @@ -163,7 +163,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/certificate-transparency-go v1.1.4 // indirect github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28 // indirect + github.com/google/go-containerregistry v0.14.0 // indirect github.com/google/go-github/v50 v50.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -182,7 +182,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.15.11 // indirect + github.com/klauspost/compress v1.16.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/lib/pq v1.10.6 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -214,12 +214,12 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/sigstore/fulcio v1.1.0 // indirect github.com/sigstore/rekor v1.0.1 // indirect - github.com/sigstore/sigstore v1.5.1 // indirect + github.com/sigstore/sigstore v1.6.2 // indirect github.com/sigstore/timestamp-authority v0.2.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect @@ -228,7 +228,7 @@ require ( github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spiffe/go-spiffe/v2 v2.1.2 // indirect + github.com/spiffe/go-spiffe/v2 v2.1.4 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect @@ -244,31 +244,31 @@ require ( github.com/yashtewari/glob-intersection v0.1.0 // indirect github.com/zeebo/errs v1.3.0 // indirect go.mongodb.org/mongo-driver v1.10.1 // indirect - go.opentelemetry.io/otel v1.13.0 // indirect - go.opentelemetry.io/otel/trace v1.13.0 // indirect + go.opentelemetry.io/otel v1.14.0 // indirect + go.opentelemetry.io/otel/trace v1.14.0 // indirect go.step.sm/crypto v0.25.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/automaxprocs v1.5.1 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/exp v0.0.0-20220823124025-807a23277127 // indirect + golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/term v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.6.0 // indirect - google.golang.org/api v0.110.0 // indirect + golang.org/x/tools v0.7.0 // indirect + google.golang.org/api v0.116.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect - k8s.io/klog/v2 v2.90.0 // indirect - k8s.io/kube-openapi v0.0.0-20230123231816-1cb3ae25d79a // indirect - k8s.io/utils v0.0.0-20230115233650-391b47cb4029 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/release-utils v0.7.3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index 0d20997797..ddbb5707c9 100644 --- a/go.sum +++ b/go.sum @@ -35,16 +35,16 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/kms v1.9.0 h1:b0votJQa/9DSsxgHwN33/tTLA7ZHVzfWhDCrfiXijSo= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/kms v1.10.1 h1:7hm1bRqGCA1GBRQUrp831TwJ9TWhP+tvLuP497CQS2g= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -189,54 +189,52 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdK github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.44.195 h1:d5xFL0N83Fpsq2LFiHgtBUHknCRUPGHdOlCWt/jtOJs= +github.com/aws/aws-sdk-go v1.44.239 h1:AenB6byCYGSBb30q99CGYqFbqpLpWrTidzm7MzxtuPo= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= -github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.17.8 h1:GMupCNNI7FARX27L7GjCJM8NgivWbRgpjNI/hOQjFS8= +github.com/aws/aws-sdk-go-v2 v1.17.8/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= -github.com/aws/aws-sdk-go-v2/config v1.18.8 h1:lDpy0WM8AHsywOnVrOHaSMfpaiV2igOw8D7svkFkXVA= -github.com/aws/aws-sdk-go-v2/config v1.18.8/go.mod h1:5XCmmyutmzzgkpk/6NYTjeWb6lgo9N170m1j6pQkIBs= +github.com/aws/aws-sdk-go-v2/config v1.18.21 h1:ENTXWKwE8b9YXgQCsruGLhvA9bhg+RqAsL9XEMEsa2c= +github.com/aws/aws-sdk-go-v2/config v1.18.21/go.mod h1:+jPQiVPz1diRnjj6VGqWcLK6EzNmQ42l7J3OqGTLsSY= github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= -github.com/aws/aws-sdk-go-v2/credentials v1.13.8 h1:vTrwTvv5qAwjWIGhZDSBH/oQHuIQjGmD232k01FUh6A= -github.com/aws/aws-sdk-go-v2/credentials v1.13.8/go.mod h1:lVa4OHbvgjVot4gmh1uouF1ubgexSCN92P6CJQpT0t8= +github.com/aws/aws-sdk-go-v2/credentials v1.13.20 h1:oZCEFcrMppP/CNiS8myzv9JgOzq2s0d3v3MXYil/mxQ= +github.com/aws/aws-sdk-go-v2/credentials v1.13.20/go.mod h1:xtZnXErtbZ8YGXC3+8WfajpMBn5Ga/3ojZdxHq6iI8o= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.2 h1:jOzQAesnBFDmz93feqKnsTHsXrlwWORNZMFHMV+WLFU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.2/go.mod h1:cDh1p6XkSGSwSRIArWRc6+UqAQ7x4alQ0QfpVR6f+co= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.32 h1:dpbVNUjczQ8Ae3QKHbpHBpfvaVkRdesxpTOe9pTouhU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.32/go.mod h1:RudqOgadTWdcS3t/erPQo24pcVEoYyqj/kKW5Vya21I= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.26 h1:QH2kOS3Ht7x+u0gHCh06CXL/h6G8LQJFpZfFBYBNboo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.26/go.mod h1:vq86l7956VgFr0/FWQ2BWnK07QC3WYsepKzy33qqY5U= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.33 h1:HbH1VjUgrCdLJ+4lnnuLI4iVNRvBbBELGaJ5f69ClA8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.33/go.mod h1:zG2FcwjQarWaqXSCGpgcr3RSjZ6dHGguZSppUL0XR7Q= github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 h1:uiF/RI+Up8H2xdgT2GWa20YzxiKEalHieqNjm6HC3Xk= github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18/go.mod h1:DQtDYmexqR+z+B6HBCvY7zK/tuXKv6Zy/IwOXOK3eow= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17 h1:bcQy5/dcJO8VQD+p0tDoIYdgEC3ch9f1/BNRES7XMug= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17/go.mod h1:r1Vuka0kyzqN0sZm4lYTXf0Vhl+o/mTLq6vKpBBZYaQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k= -github.com/aws/aws-sdk-go-v2/service/kms v1.20.0 h1:1mEQ1BVRfxU2KzcUUIzqDQ8p6yPkhzHrHT++sjtLJts= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.26 h1:uUt4XctZLhl9wBE1L8lobU3bVN8SNUP7T+olb0bWBO4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.26/go.mod h1:Bd4C/4PkVGubtNe5iMXu5BNnaBi/9t/UsFspPt4ram8= +github.com/aws/aws-sdk-go-v2/service/kms v1.20.10 h1:rmw2sdnYS5kP96hKmcm8Yr+ttZLC/zHER8nuQ9vbomc= github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 h1:/2gzjhQowRLarkkBOGPXSRnb8sQ2RVsjdG1C/UliK/c= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.0/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.8 h1:5cb3D6xb006bPTqEfCNaEA6PPEfBXxxy4NNeX/44kGk= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.8/go.mod h1:GNIveDnP+aE3jujyUSH5aZ/rktsTM5EvtKnCqBZawdw= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 h1:Jfly6mRxk2ZOSlbCvZfKNS7TukSx1mIzhSsqZ/IGSZI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.8 h1:NZaj0ngZMzsubWZbrEFSB4rgSQRbFq38Sd6KBxHuOIU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.8/go.mod h1:44qFP1g7pfd+U+sQHLPalAPKnyfTZjJsYR4xIwsJy5o= github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 h1:kOO++CYo50RcTFISESluhWEi5Prhg+gaSs4whWabiZU= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.0/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I= +github.com/aws/aws-sdk-go-v2/service/sts v1.18.9 h1:Qf1aWwnsNkyAoqDqmdM3nHwN78XQjec27LjM6b9vyfI= +github.com/aws/aws-sdk-go-v2/service/sts v1.18.9/go.mod h1:yyW88BEPXA2fGFyI2KCcZC3dNpiT0CZAHaF+i656/tQ= github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= @@ -297,7 +295,7 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/clbanning/mxj/v2 v2.5.6 h1:Jm4VaCI/+Ug5Q57IzEoZbwx4iQFA6wkXv72juUSeK+g= github.com/clbanning/mxj/v2 v2.5.6/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudevents/sdk-go/v2 v2.13.0 h1:2zxDS8RyY1/wVPULGGbdgniGXSzLaRJVl136fLXGsYw= +github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -376,8 +374,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= -github.com/containerd/stargz-snapshotter/estargz v0.12.1 h1:+7nYmHJb0tEkcRaAW+MHqoKaJYZmkikupxCqVtmPuY0= -github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw= +github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -457,16 +455,16 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU= -github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= +github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= -github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= +github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= @@ -624,7 +622,7 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/go-rod/rod v0.112.3 h1:xbSaA9trZ8v/+eJRGOM6exK1RCsLPwwnzA78vpES0gk= +github.com/go-rod/rod v0.112.8 h1:lYFnHv/lFyjW/Ye0IhyKLeHw/zfhHbSTqawoCi2z/nI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= @@ -749,8 +747,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw= -github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28 h1:gFDKHwyCxpzgUozSOM8eLCx0V7muSr30QYU2QH+p48E= -github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= +github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= +github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= github.com/google/go-github/v50 v50.0.0 h1:gdO1AeuSZZK4iYWwVbjni7zg8PIQhp7QfmPunr016Jk= github.com/google/go-github/v50 v50.0.0/go.mod h1:Ev4Tre8QoKiolvbpOSG3FIi4Mlon3S2Nt9W5JYqKiwA= github.com/google/go-licenses v0.0.0-20200602185517-f29a4c695c3d/go.mod h1:g1VOUGKZYIqe8lDq2mL7plhAWXqrEaGUs7eIjthN1sk= @@ -796,7 +794,7 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -834,18 +832,15 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -853,8 +848,6 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -866,9 +859,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= -github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= @@ -934,8 +925,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1004,12 +995,10 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -1019,7 +1008,6 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -1052,7 +1040,6 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -1134,7 +1121,6 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1226,8 +1212,8 @@ github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74/go.mod h1:YlB8wF github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= +github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1240,8 +1226,8 @@ github.com/sigstore/fulcio v1.1.0 h1:mzzJ05Ccu8Y2inyioklNvc8MpzlGHxu8YqNeTm0dHfU github.com/sigstore/fulcio v1.1.0/go.mod h1:zv1ZQTXZbUwQdRwajlQksc34pRas+2aZYpIZoQBNev8= github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c= github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g= -github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs= -github.com/sigstore/sigstore v1.5.1/go.mod h1:3i6UTWVNtFwOtbgG63FZZNID4vO9KcO8AszIJlaNI8k= +github.com/sigstore/sigstore v1.6.2 h1:D03GxT3YK+ZkRmCS6SJIDCpfQ0Ypy1o6mgtXBQELtZc= +github.com/sigstore/sigstore v1.6.2/go.mod h1:eN2ynU1Lp1btxinlKXtpybsAHxm6VLQ+S9iPN0cH0wQ= github.com/sigstore/timestamp-authority v0.2.1 h1:uqyamGxSXMEDt+e4t5XhR8JdJMeNBEj9V2Pol9BhuqY= github.com/sigstore/timestamp-authority v0.2.1/go.mod h1:i6zDth6bXY/ZaMzT5Hjd3ZuKtX85TW8fNoNWn8TxAc0= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1298,8 +1284,8 @@ github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/spiffe/go-spiffe/v2 v2.1.2 h1:nfNwopOP7q0qsWU6AUASqmbtYViwHA6vuHyAtqFJtNc= -github.com/spiffe/go-spiffe/v2 v2.1.2/go.mod h1:cbQmFrxsOpbm5tWURAYip9ZK0dOSFeoFG3/5Ub9Hvy0= +github.com/spiffe/go-spiffe/v2 v2.1.4 h1:Z31Ycaf2Z5DF38sQGmp+iGKjBhBlSzfAq68bfy67Mxw= +github.com/spiffe/go-spiffe/v2 v2.1.4/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -1336,8 +1322,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tektoncd/pipeline v0.45.0 h1:Hv9kyutu5GWGXKtcMrM7PXdAULgeQc0F2HWDNg+jo5c= -github.com/tektoncd/pipeline v0.45.0/go.mod h1:20Xs6qk3BTpsLHYWEtLNPM44XKqNH5jYwoomXHOGNs8= +github.com/tektoncd/pipeline v0.47.0 h1:zZxmp6im8/p9RaH32LgeCP6dwH/4hcsfvEQUrwGsUPA= +github.com/tektoncd/pipeline v0.47.0/go.mod h1:7H1DeNuEJFGoExGwQTlRul2IziCPxkjXRdDdirWmoQs= github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7 h1:YsjQ83UBIIq4k/s2PzQ6pqe4tpPtm1hia3oyNBDDrDU= github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7/go.mod h1:uJBaI0AL/kjPThiMYZcWRujEz7D401v643d6s/21GAg= github.com/tektoncd/triggers v0.23.1 h1:hdfjr5R+gC1RnHSrxanEbQGXXmKJ9QtTeV+uy4D+S8s= @@ -1450,18 +1436,18 @@ go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUz go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= -go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.13.0 h1:BHib5g8MvdqS65yo2vV1s6Le42Hm6rrw08qU6yz5JaM= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= -go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.step.sm/crypto v0.25.0 h1:a+7sKyozZH9B30s0dHluygxreUxI1NtCBEmuNXx7a4k= go.step.sm/crypto v0.25.0/go.mod h1:kr1rzO6SzeQnLm6Zu6lNtksHZLiFe9k8LolSJNhoc94= @@ -1534,8 +1520,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220823124025-807a23277127 h1:S4NrSKDfihhl3+4jSTgwoIevKxX9p7Iv9x++OEIptDo= -golang.org/x/exp v0.0.0-20220823124025-807a23277127/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1656,8 +1642,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1895,8 +1881,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1936,8 +1922,8 @@ google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00 google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.116.0 h1:09tOPVufPwfm5W4aA8EizGHJ7BcoRDsIareM2a15gO4= +google.golang.org/api v0.116.0/go.mod h1:9cD4/t6uvd9naoEJFA+M96d0IuB6BqFuyhpw68+mRGg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2012,8 +1998,8 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 h1:0BOZf6qNozI3pkN3fJLwNubheHJYHhMh91GRFOWWK08= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2165,21 +2151,21 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= -k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200204173128-addea2498afe/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/kube-openapi v0.0.0-20230123231816-1cb3ae25d79a h1:s6zvHjyDQX1NtVT88pvw2tddqhqY0Bz0Gbnn+yctsFU= -k8s.io/kube-openapi v0.0.0-20230123231816-1cb3ae25d79a/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/eventing v0.35.5 h1:G9gthy4CpBUWH/NvTX3SvKHGGqHMPOlWY++JiV+oybg= knative.dev/networking v0.0.0-20220404212543-dde40b019aff h1:pqzWi29qb44TY+5xtc9vty4mSyUYvojXZGCp0y/91eo= knative.dev/pkg v0.0.0-20230125083639-408ad0773f47 h1:zlRO7wXOHVYgKvsC3nIaYGqeQGlLJL8EIUY30Rh37Is= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index ddddbd21f2..ac02a3ce12 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.18.0" +const Version = "1.19.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md index 197c118e2a..37e84b537b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -1,3 +1,2781 @@ +# Release (2023-04-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.15.0](service/dlm/CHANGELOG.md#v1150-2023-04-07) + * **Announcement**: This release includes breaking changes for the timestamp trait on the data lifecycle management client. + * **Feature**: Updated timestamp format for GetLifecyclePolicy API + * **Bug Fix**: Correct timestamp type for data lifecycle manager. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.21.0](service/docdb/CHANGELOG.md#v1210-2023-04-07) + * **Feature**: This release adds a new parameter 'DBClusterParameterGroupName' to 'RestoreDBClusterFromSnapshot' API to associate the name of the DB cluster parameter group while performing restore. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.8](service/fsx/CHANGELOG.md#v1288-2023-04-07) + * **Documentation**: Amazon FSx for Lustre now supports creating data repository associations on Persistent_1 and Scratch_2 file systems. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.31.0](service/lambda/CHANGELOG.md#v1310-2023-04-07) + * **Feature**: This release adds a new Lambda InvokeWithResponseStream API to support streaming Lambda function responses. The release also adds a new InvokeMode parameter to Function Url APIs to control whether the response will be streamed or buffered. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.34.0](service/quicksight/CHANGELOG.md#v1340-2023-04-07) + * **Feature**: This release has two changes: adding the OR condition to tag-based RLS rules in CreateDataSet and UpdateDataSet; adding RefreshSchedule and Incremental RefreshProperties operations for users to programmatically configure SPICE dataset ingestions. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.3](service/redshiftdata/CHANGELOG.md#v1193-2023-04-07) + * **Documentation**: Update documentation of API descriptions as needed in support of temporary credentials with IAM identity. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.1](service/servicecatalog/CHANGELOG.md#v1181-2023-04-07) + * **Documentation**: Updates description for property + +# Release (2023-04-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.27.0](service/cloudformation/CHANGELOG.md#v1270-2023-04-06) + * **Feature**: Including UPDATE_COMPLETE as a failed status for DeleteStack waiter. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.22.0](service/greengrassv2/CHANGELOG.md#v1220-2023-04-06) + * **Feature**: Add support for SUCCEEDED value in coreDeviceExecutionStatus field. Documentation updates for Greengrass V2. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.21.0](service/proton/CHANGELOG.md#v1210-2023-04-06) + * **Feature**: This release adds support for the AWS Proton service sync feature. Service sync enables managing an AWS Proton service (creating and updating instances) and all of it's corresponding service instances from a Git repository. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.1](service/rds/CHANGELOG.md#v1421-2023-04-06) + * **Documentation**: Adds and updates the SDK examples + +# Release (2023-04-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.31.0](service/configservice/CHANGELOG.md#v1310-2023-04-05) + * **Feature**: This release adds resourceType enums for types released in March 2023. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.3](service/ecs/CHANGELOG.md#v1243-2023-04-05) + * **Documentation**: This is a document only updated to add information about Amazon Elastic Inference (EI). +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.7](service/identitystore/CHANGELOG.md#v1167-2023-04-05) + * **Documentation**: Documentation updates for Identity Store CLI command reference. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.1.0](service/ivsrealtime/CHANGELOG.md#v110-2023-04-05) + * **Feature**: Fix ParticipantToken ExpirationTime format +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.26.0](service/networkfirewall/CHANGELOG.md#v1260-2023-04-05) + * **Feature**: AWS Network Firewall now supports IPv6-only subnets. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.0](service/servicecatalog/CHANGELOG.md#v1180-2023-04-05) + * **Feature**: removed incorrect product type value +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.1](service/vpclattice/CHANGELOG.md#v101-2023-04-05) + * **Documentation**: This release removes the entities in the API doc model package for auth policies. + +# Release (2023-04-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.10.0](service/amplifyuibuilder/CHANGELOG.md#v1100-2023-04-04) + * **Feature**: Support StorageField and custom displays for data-bound options in form builder. Support non-string operands for predicates in collections. Support choosing client to get token from. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.1](service/autoscaling/CHANGELOG.md#v1281-2023-04-04) + * **Documentation**: Documentation updates for Amazon EC2 Auto Scaling +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.93.0](service/ec2/CHANGELOG.md#v1930-2023-04-04) + * **Feature**: C6in, M6in, M6idn, R6in and R6idn bare metal instances are powered by 3rd Generation Intel Xeon Scalable processors and offer up to 200 Gbps of network bandwidth. +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.13.0](service/elasticinference/CHANGELOG.md#v1130-2023-04-04) + * **Feature**: Updated public documentation for the Describe and Tagging APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.73.0](service/sagemaker/CHANGELOG.md#v1730-2023-04-04) + * **Feature**: Amazon SageMaker Asynchronous Inference now allows customer's to receive failure model responses in S3 and receive success/failure model responses in SNS notifications. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.19.0](service/sagemakerruntime/CHANGELOG.md#v1190-2023-04-04) + * **Feature**: Amazon SageMaker Asynchronous Inference now provides customers a FailureLocation as a response parameter in InvokeEndpointAsync API to capture the model failure responses. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.28.0](service/wafv2/CHANGELOG.md#v1280-2023-04-04) + * **Feature**: This release rolls back association config feature for webACLs that protect CloudFront protections. + +# Release (2023-04-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.45.0](service/glue/CHANGELOG.md#v1450-2023-04-03) + * **Feature**: Add support for database-level federation +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.21.0](service/lakeformation/CHANGELOG.md#v1210-2023-04-03) + * **Feature**: Add support for database-level federation +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.18.0](service/licensemanager/CHANGELOG.md#v1180-2023-04-03) + * **Feature**: This release adds grant override options to the CreateGrantVersion API. These options can be used to specify grant replacement behavior during grant activation. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.15.0](service/mwaa/CHANGELOG.md#v1150-2023-04-03) + * **Feature**: This Amazon MWAA release adds the ability to customize the Apache Airflow environment by launching a shell script at startup. This shell script is hosted in your environment's Amazon S3 bucket. Amazon MWAA runs the script before installing requirements and initializing the Apache Airflow process. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.17.0](service/servicecatalog/CHANGELOG.md#v1170-2023-04-03) + * **Feature**: This release introduces Service Catalog support for Terraform open source. It enables 1. The notify* APIs to Service Catalog. These APIs are used by the terraform engine to notify the result of the provisioning engine execution. 2. Adds a new TERRAFORM_OPEN_SOURCE product type in CreateProduct API. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.27.0](service/wafv2/CHANGELOG.md#v1270-2023-04-03) + * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. + +# Release (2023-03-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.1](service/ec2/CHANGELOG.md#v1921-2023-03-31) + * **Documentation**: Documentation updates for EC2 On Demand Capacity Reservations +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.1.0](service/internetmonitor/CHANGELOG.md#v110-2023-03-31) + * **Feature**: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to deliver internet measurements to Amazon S3 buckets as well as CloudWatch Logs. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.1](service/resiliencehub/CHANGELOG.md#v1101-2023-03-31) + * **Documentation**: Adding EKS related documentation for appTemplateBody +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.1](service/s3/CHANGELOG.md#v1311-2023-03-31) + * **Documentation**: Documentation updates for Amazon S3 +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.14.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1140-2023-03-31) + * **Feature**: In this release, you can now chose between soft delete and hard delete when calling the DeleteRecord API, so you have more flexibility when it comes to managing online store data. + +# Release (2023-03-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.25.0](service/athena/CHANGELOG.md#v1250-2023-03-30) + * **Feature**: Make DefaultExecutorDpuSize and CoordinatorDpuSize fields optional in StartSession +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.0](service/autoscaling/CHANGELOG.md#v1280-2023-03-30) + * **Feature**: Amazon EC2 Auto Scaling now supports Elastic Load Balancing traffic sources with the AttachTrafficSources, DetachTrafficSources, and DescribeTrafficSources APIs. This release also introduces a new activity status, "WaitingForConnectionDraining", for VPC Lattice to the DescribeScalingActivities API. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.23.0](service/batch/CHANGELOG.md#v1230-2023-03-30) + * **Feature**: This feature allows Batch on EKS to support configuration of Pod Labels through Metadata for Batch on EKS Jobs. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.22.0](service/computeoptimizer/CHANGELOG.md#v1220-2023-03-30) + * **Feature**: This release adds support for HDD EBS volume types and io2 Block Express. We are also adding support for 61 new instance types and instances that have non consecutive runtime. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.12.0](service/drs/CHANGELOG.md#v1120-2023-03-30) + * **Feature**: Adding a field to the replication configuration APIs to support the auto replicate new disks feature. We also deprecated RetryDataReplication. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.0](service/ec2/CHANGELOG.md#v1920-2023-03-30) + * **Feature**: This release adds support for Tunnel Endpoint Lifecycle control, a new feature that provides Site-to-Site VPN customers with better visibility and control of their VPN tunnel maintenance updates. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.24.0](service/emr/CHANGELOG.md#v1240-2023-03-30) + * **Feature**: Updated DescribeCluster and ListClusters API responses to include ErrorDetail that specifies error code, programmatically accessible error data,and an error message. ErrorDetail provides the underlying reason for cluster failure and recommends actions to simplify troubleshooting of EMR clusters. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.44.0](service/glue/CHANGELOG.md#v1440-2023-03-30) + * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.20.0](service/guardduty/CHANGELOG.md#v1200-2023-03-30) + * **Feature**: Added EKS Runtime Monitoring feature support to existing detector, finding APIs and introducing new Coverage APIs +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.0](service/imagebuilder/CHANGELOG.md#v1230-2023-03-30) + * **Feature**: Adds support for new image workflow details and image vulnerability detection. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.21.0](service/ivs/CHANGELOG.md#v1210-2023-03-30) + * **Feature**: Amazon Interactive Video Service (IVS) now offers customers the ability to configure IVS channels to allow insecure RTMP ingest. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.39.0](service/kendra/CHANGELOG.md#v1390-2023-03-30) + * **Feature**: AWS Kendra now supports featured results for a query. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.25.0](service/networkfirewall/CHANGELOG.md#v1250-2023-03-30) + * **Feature**: AWS Network Firewall added TLS inspection configurations to allow TLS traffic inspection. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.2.0](service/sagemakergeospatial/CHANGELOG.md#v120-2023-03-30) + * **Feature**: Amazon SageMaker geospatial capabilities now supports server-side encryption with customer managed KMS key and SageMaker notebooks with a SageMaker geospatial image in a Amazon SageMaker Domain with VPC only mode. +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.0](service/vpclattice/CHANGELOG.md#v100-2023-03-30) + * **Release**: New AWS service client module + * **Feature**: General Availability (GA) release of Amazon VPC Lattice +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.19.0](service/wellarchitected/CHANGELOG.md#v1190-2023-03-30) + * **Feature**: AWS Well-Architected SDK now supports getting consolidated report metrics and generating a consolidated report PDF. + +# Release (2023-03-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.2.0](service/opensearchserverless/CHANGELOG.md#v120-2023-03-29) + * **Feature**: This release includes two new exception types "ServiceQuotaExceededException" and "OcuLimitExceededException". +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.0](service/rds/CHANGELOG.md#v1420-2023-03-29) + * **Feature**: Add support for creating a read replica DB instance from a Multi-AZ DB cluster. + +# Release (2023-03-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.15.0](service/ssmcontacts/CHANGELOG.md#v1150-2023-03-28) + * **Feature**: This release adds 12 new APIs as part of Oncall Schedule feature release, adds support for a new contact type: ONCALL_SCHEDULE. Check public documentation for AWS ssm-contacts for more information +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.21.0](service/ssmincidents/CHANGELOG.md#v1210-2023-03-28) + * **Feature**: Increased maximum length of "TriggerDetails.rawData" to 10K characters and "IncidentSummary" to 8K characters. + +# Release (2023-03-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.24.0](service/athena/CHANGELOG.md#v1240-2023-03-27) + * **Feature**: Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.4.0](service/chimesdkvoice/CHANGELOG.md#v140-2023-03-27) + * **Feature**: Documentation updates for Amazon Chime SDK Voice. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.50.0](service/connect/CHANGELOG.md#v1500-2023-03-27) + * **Feature**: This release introduces support for RelatedContactId in the StartChatContact API. Interactive message and interactive message response have been added to the list of supported message content types for this API as well. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.7](service/connectparticipant/CHANGELOG.md#v1157-2023-03-27) + * **Documentation**: This release provides an update to the SendMessage API to handle interactive message response content-types. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.26.0](service/iotwireless/CHANGELOG.md#v1260-2023-03-27) + * **Feature**: Introducing new APIs that enable Sidewalk devices to communicate with AWS IoT Core through Sidewalk gateways. This will empower AWS customers to connect Sidewalk devices with other AWS IoT Services, creating possibilities for seamless integration and advanced device management. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.31.0](service/medialive/CHANGELOG.md#v1310-2023-03-27) + * **Feature**: AWS Elemental MediaLive now supports ID3 tag insertion for audio only HLS output groups. AWS Elemental Link devices now support tagging. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.1](service/sagemaker/CHANGELOG.md#v1721-2023-03-27) + * **Documentation**: Fixed some improperly rendered links in SDK documentation. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.30.0](service/securityhub/CHANGELOG.md#v1300-2023-03-27) + * **Feature**: Added new resource detail objects to ASFF, including resources for AwsEksCluster, AWSS3Bucket, AwsEc2RouteTable and AwsEC2Instance. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.17.0](service/servicecatalogappregistry/CHANGELOG.md#v1170-2023-03-27) + * **Feature**: In this release, we started supporting ARN in applicationSpecifier and attributeGroupSpecifier. GetAttributeGroup, ListAttributeGroups and ListAttributeGroupsForApplication APIs will now have CreatedBy field in the response. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.13.0](service/voiceid/CHANGELOG.md#v1130-2023-03-27) + * **Feature**: Amazon Connect Voice ID now supports multiple fraudster watchlists. Every domain has a default watchlist where all existing fraudsters are placed by default. Custom watchlists may now be created, managed, and evaluated against for known fraudster detection. + +# Release (2023-03-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.7](service/cloudwatch/CHANGELOG.md#v1257-2023-03-24) + * **Documentation**: Doc-only update to correct alarm actions list +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.23.0](service/comprehend/CHANGELOG.md#v1230-2023-03-24) + * **Feature**: This release adds a new field (FlywheelArn) to the EntitiesDetectionJobProperties object. The FlywheelArn field is returned in the DescribeEntitiesDetectionJob and ListEntitiesDetectionJobs responses when the EntitiesDetection job is started with a FlywheelArn instead of an EntityRecognizerArn . +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.41.0](service/rds/CHANGELOG.md#v1410-2023-03-24) + * **Feature**: Added error code CreateCustomDBEngineVersionFault for when the create custom engine version for Custom engines fails. + +# Release (2023-03-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.22.0](service/batch/CHANGELOG.md#v1220-2023-03-23) + * **Feature**: This feature allows Batch to support configuration of ephemeral storage size for jobs running on FARGATE +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.11.0](service/chimesdkidentity/CHANGELOG.md#v1110-2023-03-23) + * **Feature**: AppInstanceBots can be used to add a bot powered by Amazon Lex to chat channels. ExpirationSettings provides automatic resource deletion for AppInstanceUsers. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.3.0](service/chimesdkmediapipelines/CHANGELOG.md#v130-2023-03-23) + * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.14.0](service/chimesdkmessaging/CHANGELOG.md#v1140-2023-03-23) + * **Feature**: ExpirationSettings provides automatic resource deletion for Channels. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.3.0](service/chimesdkvoice/CHANGELOG.md#v130-2023-03-23) + * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.18.0](service/codeartifact/CHANGELOG.md#v1180-2023-03-23) + * **Feature**: Repository CreationTime is added to the CreateRepository and ListRepositories API responses. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.19.0](service/guardduty/CHANGELOG.md#v1190-2023-03-23) + * **Feature**: Adds AutoEnableOrganizationMembers attribute to DescribeOrganizationConfiguration and UpdateOrganizationConfiguration APIs. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.0.0](service/ivsrealtime/CHANGELOG.md#v100-2023-03-23) + * **Release**: New AWS service client module + * **Feature**: Initial release of the Amazon Interactive Video Service RealTime API. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.34.0](service/mediaconvert/CHANGELOG.md#v1340-2023-03-23) + * **Feature**: AWS Elemental MediaConvert SDK now supports passthrough of ID3v2 tags for audio inputs to audio-only HLS outputs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.0](service/sagemaker/CHANGELOG.md#v1720-2023-03-23) + * **Feature**: Amazon SageMaker Autopilot adds two new APIs - CreateAutoMLJobV2 and DescribeAutoMLJobV2. Amazon SageMaker Notebook Instances now supports the ml.geospatial.interactive instance type. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.21.0](service/servicediscovery/CHANGELOG.md#v1210-2023-03-23) + * **Feature**: Reverted the throttling exception RequestLimitExceeded for AWS Cloud Map APIs introduced in SDK version 1.12.424 2023-03-09 to previous exception specified in the ErrorCode. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.21.0](service/textract/CHANGELOG.md#v1210-2023-03-23) + * **Feature**: The AnalyzeDocument - Tables feature adds support for new elements in the API: table titles, footers, section titles, summary cells/tables, and table type. + +# Release (2023-03-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.8](service/iam/CHANGELOG.md#v1198-2023-03-22) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.11.0](service/iottwinmaker/CHANGELOG.md#v1110-2023-03-22) + * **Feature**: This release adds support of adding metadata when creating a new scene or updating an existing scene. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.8](service/networkmanager/CHANGELOG.md#v1178-2023-03-22) + * **Documentation**: This release includes an update to create-transit-gateway-route-table-attachment, showing example usage for TransitGatewayRouteTableArn. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.0](service/resiliencehub/CHANGELOG.md#v1100-2023-03-22) + * **Feature**: This release provides customers with the ability to import resources from within an EKS cluster and assess the resiliency of EKS cluster workloads. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.36.0](service/ssm/CHANGELOG.md#v1360-2023-03-22) + * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for AmazonLinux2023, AlmaLinux. + +# Release (2023-03-21) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.13.0](service/chimesdkmessaging/CHANGELOG.md#v1130-2023-03-21) + * **Feature**: Amazon Chime SDK messaging customers can now manage streaming configuration for messaging data for archival and analysis. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.1.0](service/cleanrooms/CHANGELOG.md#v110-2023-03-21) + * **Feature**: GA Release of AWS Clean Rooms, Added Tagging Functionality +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.91.0](service/ec2/CHANGELOG.md#v1910-2023-03-21) + * **Feature**: This release adds support for AWS Network Firewall, AWS PrivateLink, and Gateway Load Balancers to Amazon VPC Reachability Analyzer, and it makes the path destination optional as long as a destination address in the filter at source is provided. +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.14.0](service/internal/s3shared/CHANGELOG.md#v1140-2023-03-21) + * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.28.0](service/iotsitewise/CHANGELOG.md#v1280-2023-03-21) + * **Feature**: Provide support for tagging of data streams and enabling tag based authorization for property alias +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.18.0](service/mgn/CHANGELOG.md#v1180-2023-03-21) + * **Feature**: This release introduces the Import and export feature and expansion of the post-launch actions +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.0](service/s3/CHANGELOG.md#v1310-2023-03-21) + * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config + +# Release (2023-03-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.19.0](service/applicationautoscaling/CHANGELOG.md#v1190-2023-03-20) + * **Feature**: With this release customers can now tag their Application Auto Scaling registered targets with key-value pairs and manage IAM permissions for all the tagged resources centrally. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.20.0](service/neptune/CHANGELOG.md#v1200-2023-03-20) + * **Feature**: This release makes following few changes. db-cluster-identifier is now a required parameter of create-db-instance. describe-db-cluster will now return PendingModifiedValues and GlobalClusterIdentifier fields in the response. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.16.0](service/s3outposts/CHANGELOG.md#v1160-2023-03-20) + * **Feature**: S3 On Outposts added support for endpoint status, and a failed endpoint reason, if any +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.14.0](service/workdocs/CHANGELOG.md#v1140-2023-03-20) + * **Feature**: This release adds a new API, SearchResources, which enable users to search through metadata and content of folders, documents, document versions and comments in a WorkDocs site. + +# Release (2023-03-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.6.0](service/billingconductor/CHANGELOG.md#v160-2023-03-17) + * **Feature**: This release adds a new filter to ListAccountAssociations API and a new filter to ListBillingGroups API. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.30.0](service/configservice/CHANGELOG.md#v1300-2023-03-17) + * **Feature**: This release adds resourceType enums for types released from October 2022 through February 2023. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.25.0](service/databasemigrationservice/CHANGELOG.md#v1250-2023-03-17) + * **Feature**: S3 setting to create AWS Glue Data Catalog. Oracle setting to control conversion of timestamp column. Support for Kafka SASL Plain authentication. Setting to map boolean from PostgreSQL to Redshift. SQL Server settings to force lob lookup on inline LOBs and to control access of database logs. + +# Release (2023-03-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.18](config/CHANGELOG.md#v11818-2023-03-16) + * **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.18.0](service/guardduty/CHANGELOG.md#v1180-2023-03-16) + * **Feature**: Updated 9 APIs for feature enablement to reflect expansion of GuardDuty to features. Added new APIs and updated existing APIs to support RDS Protection GA. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.7](service/resourceexplorer2/CHANGELOG.md#v127-2023-03-16) + * **Documentation**: Documentation updates for APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.7](service/sagemakerruntime/CHANGELOG.md#v1187-2023-03-16) + * **Documentation**: Documentation updates for SageMaker Runtime + +# Release (2023-03-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.9.0](service/migrationhubstrategy/CHANGELOG.md#v190-2023-03-15) + * **Feature**: This release adds the binary analysis that analyzes IIS application DLLs on Windows and Java applications on Linux to provide anti-pattern report without configuring access to the source code. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.31.0](service/s3control/CHANGELOG.md#v1310-2023-03-15) + * **Feature**: Added support for S3 Object Lambda aliases. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.3.0](service/securitylake/CHANGELOG.md#v130-2023-03-15) + * **Feature**: Make Create/Get/ListSubscribers APIs return resource share ARN and name so they can be used to validate the RAM resource share to accept. GetDatalake can be used to track status of UpdateDatalake and DeleteDatalake requests. + +# Release (2023-03-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.13.0](feature/ec2/imds/CHANGELOG.md#v1130-2023-03-14) + * **Feature**: Add flag to disable IMDSv1 fallback +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.18.0](service/applicationautoscaling/CHANGELOG.md#v1180-2023-03-14) + * **Feature**: Application Auto Scaling customers can now use mathematical functions to customize the metric used with Target Tracking policies within the policy configuration itself, saving the cost and effort of publishing the customizations as a separate metric. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.19.0](service/dataexchange/CHANGELOG.md#v1190-2023-03-14) + * **Feature**: This release enables data providers to license direct access to S3 objects encrypted with Customer Managed Keys (CMK) in AWS KMS through AWS Data Exchange. Subscribers can use these keys to decrypt, then use the encrypted S3 objects shared with them, without creating or managing copies. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.7](service/directconnect/CHANGELOG.md#v1187-2023-03-14) + * **Documentation**: describe-direct-connect-gateway-associations includes a new status, updating, indicating that the association is currently in-process of updating. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.90.0](service/ec2/CHANGELOG.md#v1900-2023-03-14) + * **Feature**: This release adds a new DnsOptions key (PrivateDnsOnlyForInboundResolverEndpoint) to CreateVpcEndpoint and ModifyVpcEndpoint APIs. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.6](service/iam/CHANGELOG.md#v1196-2023-03-14) + * **Documentation**: Documentation only updates to correct customer-reported issues +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.2.0](service/keyspaces/CHANGELOG.md#v120-2023-03-14) + * **Feature**: Adding support for client-side timestamps +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.6](service/support/CHANGELOG.md#v1146-2023-03-14) + * **Announcement**: Model regenerated with support for null string values to properly implement `support` service operations `DescribeTrustedAdvisorCheckRefreshStatuses` and `DescribeTrustedAdvisorCheckSummaries` + +# Release (2023-03-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.15.0](service/appintegrations/CHANGELOG.md#v1150-2023-03-13) + * **Feature**: Adds FileConfiguration to Amazon AppIntegrations CreateDataIntegration supporting scheduled downloading of third party files into Amazon Connect from sources such as Microsoft SharePoint. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.2](service/lakeformation/CHANGELOG.md#v1202-2023-03-13) + * **Documentation**: This release updates the documentation regarding Get/Update DataCellsFilter +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.30.0](service/s3control/CHANGELOG.md#v1300-2023-03-13) + * **Feature**: Added support for cross-account Multi-Region Access Points. Added support for S3 Replication for S3 on Outposts. +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.1.0](service/tnb/CHANGELOG.md#v110-2023-03-13) + * **Feature**: This release adds tagging support to the following Network Instance APIs : Instantiate, Update, Terminate. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.13.0](service/wisdom/CHANGELOG.md#v1130-2023-03-13) + * **Feature**: This release extends Wisdom CreateKnowledgeBase API to support SharePoint connector type by removing the @required trait for objectField + +# Release (2023-03-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.4.0](service/ivschat/CHANGELOG.md#v140-2023-03-10) + * **Feature**: This release adds a new exception returned when calling AWS IVS chat UpdateLoggingConfiguration. Now UpdateLoggingConfiguration can return ConflictException when invalid updates are made in sequence to Logging Configurations. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.0](service/secretsmanager/CHANGELOG.md#v1190-2023-03-10) + * **Feature**: The type definitions of SecretString and SecretBinary now have a minimum length of 1 in the model to match the exception thrown when you pass in empty values. + +# Release (2023-03-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.17.0](service/codeartifact/CHANGELOG.md#v1170-2023-03-09) + * **Feature**: This release introduces the generic package format, a mechanism for storing arbitrary binary assets. It also adds a new API, PublishPackageVersion, to allow for publishing generic packages. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.49.0](service/connect/CHANGELOG.md#v1490-2023-03-09) + * **Feature**: This release adds a new API, GetMetricDataV2, which returns metric data for Amazon Connect. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.11.0](service/evidently/CHANGELOG.md#v1110-2023-03-09) + * **Feature**: Updated entity override documentation +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.5](service/networkmanager/CHANGELOG.md#v1175-2023-03-09) + * **Documentation**: This update provides example usage for TransitGatewayRouteTableArn. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.33.0](service/quicksight/CHANGELOG.md#v1330-2023-03-09) + * **Feature**: This release has two changes: add state persistence feature for embedded dashboard and console in GenerateEmbedUrlForRegisteredUser API; add properties for hidden collapsed row dimensions in PivotTableOptions. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.0](service/redshiftdata/CHANGELOG.md#v1190-2023-03-09) + * **Feature**: Added support for Redshift Serverless workgroup-arn wherever the WorkgroupName parameter is available. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.71.0](service/sagemaker/CHANGELOG.md#v1710-2023-03-09) + * **Feature**: Amazon SageMaker Inference now allows SSM access to customer's model container by setting the "EnableSSMAccess" parameter for a ProductionVariant in CreateEndpointConfig API. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.20.0](service/servicediscovery/CHANGELOG.md#v1200-2023-03-09) + * **Feature**: Updated all AWS Cloud Map APIs to provide consistent throttling exception (RequestLimitExceeded) +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.17.0](service/sesv2/CHANGELOG.md#v1170-2023-03-09) + * **Feature**: This release introduces a new recommendation in Virtual Deliverability Manager Advisor, which detects missing or misconfigured Brand Indicator for Message Identification (BIMI) DNS records for customer sending identities. + +# Release (2023-03-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.23.0](service/athena/CHANGELOG.md#v1230-2023-03-08) + * **Feature**: A new field SubstatementType is added to GetQueryExecution API, so customers have an error free way to detect the query type and interpret the result. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.0](service/dynamodb/CHANGELOG.md#v1190-2023-03-08) + * **Feature**: Adds deletion protection support to DynamoDB tables. Tables with deletion protection enabled cannot be deleted. Deletion protection is disabled by default, can be enabled via the CreateTable or UpdateTable APIs, and is visible in TableDescription. This setting is not replicated for Global Tables. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.89.0](service/ec2/CHANGELOG.md#v1890-2023-03-08) + * **Feature**: Introducing Amazon EC2 C7g, M7g and R7g instances, powered by the latest generation AWS Graviton3 processors and deliver up to 25% better performance over Graviton2-based instances. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.0](service/lakeformation/CHANGELOG.md#v1200-2023-03-08) + * **Feature**: This release adds two new API support "GetDataCellsFiler" and "UpdateDataCellsFilter", and also updates the corresponding documentation. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.21.0](service/mediapackage/CHANGELOG.md#v1210-2023-03-08) + * **Feature**: This release provides the date and time live resources were created. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.22.0](service/mediapackagevod/CHANGELOG.md#v1220-2023-03-08) + * **Feature**: This release provides the date and time VOD resources were created. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.17.0](service/route53resolver/CHANGELOG.md#v1170-2023-03-08) + * **Feature**: Add dual-stack and IPv6 support for Route 53 Resolver Endpoint,Add IPv6 target IP in Route 53 Resolver Forwarding Rule +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.70.0](service/sagemaker/CHANGELOG.md#v1700-2023-03-08) + * **Feature**: There needs to be a user identity to specify the SageMaker user who perform each action regarding the entity. However, these is a not a unified concept of user identity across SageMaker service that could be used today. + +# Release (2023-03-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.24.0](service/databasemigrationservice/CHANGELOG.md#v1240-2023-03-07) + * **Feature**: This release adds DMS Fleet Advisor Target Recommendation APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to start Target Recommendation calculation. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.1](service/location/CHANGELOG.md#v1221-2023-03-07) + * **Documentation**: Documentation update for the release of 3 additional map styles for use with Open Data Maps: Open Data Standard Dark, Open Data Visualization Light & Open Data Visualization Dark. + +# Release (2023-03-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.10.0](service/account/CHANGELOG.md#v1100-2023-03-06) + * **Feature**: AWS Account alternate contact email addresses can now have a length of 254 characters and contain the character "|". +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.6](service/ivs/CHANGELOG.md#v1206-2023-03-06) + * **Documentation**: Updated text description in DeleteChannel, Stream, and StreamSummary. + +# Release (2023-03-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.6](service/dynamodb/CHANGELOG.md#v1186-2023-03-03) + * **Documentation**: Documentation updates for DynamoDB. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.88.0](service/ec2/CHANGELOG.md#v1880-2023-03-03) + * **Feature**: This release adds support for a new boot mode for EC2 instances called 'UEFI Preferred'. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.1](service/macie2/CHANGELOG.md#v1271-2023-03-03) + * **Documentation**: Documentation updates for Amazon Macie +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.33.0](service/mediaconvert/CHANGELOG.md#v1330-2023-03-03) + * **Feature**: The AWS Elemental MediaConvert SDK has improved handling for different input and output color space combinations. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.30.0](service/medialive/CHANGELOG.md#v1300-2023-03-03) + * **Feature**: AWS Elemental MediaLive adds support for Nielsen watermark timezones. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.26.0](service/transcribe/CHANGELOG.md#v1260-2023-03-03) + * **Feature**: Amazon Transcribe now supports role access for these API operations: CreateVocabulary, UpdateVocabulary, CreateVocabularyFilter, and UpdateVocabularyFilter. + +# Release (2023-03-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.35.0](service/iot/CHANGELOG.md#v1350-2023-03-02) + * **Feature**: A recurring maintenance window is an optional configuration used for rolling out the job document to all devices in the target group observing a predetermined start time, duration, and frequency that the maintenance window occurs. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.8.0](service/migrationhubstrategy/CHANGELOG.md#v180-2023-03-02) + * **Feature**: This release updates the File Import API to allow importing servers already discovered by customers with reduced pre-requisites. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.19.0](service/organizations/CHANGELOG.md#v1190-2023-03-02) + * **Feature**: This release introduces a new reason code, ACCOUNT_CREATION_NOT_COMPLETE, to ConstraintViolationException in CreateOrganization API. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.17.0](service/pi/CHANGELOG.md#v1170-2023-03-02) + * **Feature**: This release adds a new field PeriodAlignment to allow the customer specifying the returned timestamp of time periods to be either the start or end time. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.2.0](service/pipes/CHANGELOG.md#v120-2023-03-02) + * **Feature**: This release fixes some input parameter range and patterns. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.69.0](service/sagemaker/CHANGELOG.md#v1690-2023-03-02) + * **Feature**: Add a new field "EndpointMetrics" in SageMaker Inference Recommender "ListInferenceRecommendationsJobSteps" API response. + +# Release (2023-03-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.0](service/codecatalyst/CHANGELOG.md#v120-2023-03-01) + * **Feature**: Published Dev Environments StopDevEnvironmentSession API +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.19.0](service/pricing/CHANGELOG.md#v1190-2023-03-01) + * **Feature**: This release adds 2 new APIs - ListPriceLists which returns a list of applicable price lists, and GetPriceListFileUrl which outputs a URL to retrieve your price lists from the generated file from ListPriceLists +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.15.0](service/s3outposts/CHANGELOG.md#v1150-2023-03-01) + * **Feature**: S3 on Outposts introduces a new API ListOutpostsWithS3, with this API you can list all your Outposts with S3 capacity. + +# Release (2023-02-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.22.0](service/comprehend/CHANGELOG.md#v1220-2023-02-28) + * **Feature**: Amazon Comprehend now supports flywheels to help you train and manage new model versions for custom models. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.87.0](service/ec2/CHANGELOG.md#v1870-2023-02-28) + * **Feature**: This release allows IMDS support to be set to v2-only on an existing AMI, so that all future instances launched from that AMI will use IMDSv2 by default. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.6](service/kms/CHANGELOG.md#v1206-2023-02-28) + * **Documentation**: AWS KMS is deprecating the RSAES_PKCS1_V1_5 wrapping algorithm option in the GetParametersForImport API that is used in the AWS KMS Import Key Material feature. AWS KMS will end support for this wrapping algorithm by October 1, 2023. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.26.0](service/lightsail/CHANGELOG.md#v1260-2023-02-28) + * **Feature**: This release adds Lightsail for Research feature support, such as GUI session access, cost estimates, stop instance on idle, and disk auto mount. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.0](service/managedblockchain/CHANGELOG.md#v1150-2023-02-28) + * **Feature**: This release adds support for tagging to the accessor resource in Amazon Managed Blockchain +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.2.0](service/omics/CHANGELOG.md#v120-2023-02-28) + * **Feature**: Minor model changes to accomodate batch imports feature + +# Release (2023-02-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.23.0](service/devopsguru/CHANGELOG.md#v1230-2023-02-27) + * **Feature**: This release adds the description field on ListAnomaliesForInsight and DescribeAnomaly API responses for proactive anomalies. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.11.0](service/drs/CHANGELOG.md#v1110-2023-02-27) + * **Feature**: New fields were added to reflect availability zone data in source server and recovery instance description commands responses, as well as source server launch status. +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.0.0](service/internetmonitor/CHANGELOG.md#v100-2023-02-27) + * **Release**: New AWS service client module + * **Feature**: CloudWatch Internet Monitor is a a new service within CloudWatch that will help application developers and network engineers continuously monitor internet performance metrics such as availability and performance between their AWS-hosted applications and end-users of these applications +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.30.0](service/lambda/CHANGELOG.md#v1300-2023-02-27) + * **Feature**: This release adds the ability to create ESMs with Document DB change streams as event source. For more information see https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.32.0](service/mediaconvert/CHANGELOG.md#v1320-2023-02-27) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for HDR10 to SDR tone mapping, and animated GIF video input sources. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.16.0](service/timestreamwrite/CHANGELOG.md#v1160-2023-02-27) + * **Feature**: This release adds the ability to ingest batched historical data or migrate data in bulk from S3 into Timestream using CSV files. + +# Release (2023-02-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.48.0](service/connect/CHANGELOG.md#v1480-2023-02-24) + * **Feature**: StartTaskContact API now supports linked task creation with a new optional RelatedContactId parameter +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.3.0](service/connectcases/CHANGELOG.md#v130-2023-02-24) + * **Feature**: This release adds the ability to delete domains through the DeleteDomain API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.5](service/redshift/CHANGELOG.md#v1275-2023-02-24) + * **Documentation**: Documentation updates for Redshift API bringing it in line with IAM best practices. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.29.0](service/securityhub/CHANGELOG.md#v1290-2023-02-24) + * **Feature**: New Security Hub APIs and updates to existing APIs that help you consolidate control findings and enable and disable controls across all supported standards +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.5](service/servicecatalog/CHANGELOG.md#v1165-2023-02-24) + * **Documentation**: Documentation updates for Service Catalog + +# Release (2023-02-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.25.0](service/appflow/CHANGELOG.md#v1250-2023-02-23) + * **Feature**: This release enables the customers to choose whether to use Private Link for Metadata and Authorization call when using a private Salesforce connections +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.0](service/ecs/CHANGELOG.md#v1240-2023-02-23) + * **Feature**: This release supports deleting Amazon ECS task definitions that are in the INACTIVE state. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.3](service/grafana/CHANGELOG.md#v1123-2023-02-23) + * **Documentation**: Doc-only update. Updated information on attached role policies for customer provided roles +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.6](service/guardduty/CHANGELOG.md#v1176-2023-02-23) + * **Documentation**: Updated API and data types descriptions for CreateFilter, UpdateFilter, and TriggerDetails. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.25.0](service/iotwireless/CHANGELOG.md#v1250-2023-02-23) + * **Feature**: In this release, we add additional capabilities for the FUOTA which allows user to configure the fragment size, the sending interval and the redundancy ratio of the FUOTA tasks +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.0](service/location/CHANGELOG.md#v1220-2023-02-23) + * **Feature**: This release adds support for using Maps APIs with an API Key in addition to AWS Cognito. This includes support for adding, listing, updating and deleting API Keys. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.0](service/macie2/CHANGELOG.md#v1270-2023-02-23) + * **Feature**: This release adds support for a new finding type, Policy:IAMUser/S3BucketSharedWithCloudFront, and S3 bucket metadata that indicates if a bucket is shared with an Amazon CloudFront OAI or OAC. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.26.0](service/wafv2/CHANGELOG.md#v1260-2023-02-23) + * **Feature**: You can now associate an AWS WAF v2 web ACL with an AWS App Runner service. + +# Release (2023-02-22) + +## General Highlights +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.2.0](service/chimesdkvoice/CHANGELOG.md#v120-2023-02-22) + * **Feature**: This release introduces support for Voice Connector media metrics in the Amazon Chime SDK Voice namespace +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.26.0](service/cloudfront/CHANGELOG.md#v1260-2023-02-22) + * **Feature**: CloudFront now supports block lists in origin request policies so that you can forward all headers, cookies, or query string from viewer requests to the origin *except* for those specified in the block list. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.23.0](service/datasync/CHANGELOG.md#v1230-2023-02-22) + * **Feature**: AWS DataSync has relaxed the minimum length constraint of AccessKey for Object Storage locations to 1. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.15.0](service/opensearch/CHANGELOG.md#v1150-2023-02-22) + * **Feature**: This release lets customers configure Off-peak window and software update related properties for a new/existing domain. It enhances the capabilities of StartServiceSoftwareUpdate API; adds 2 new APIs - ListScheduledActions & UpdateScheduledAction; and allows Auto-tune to make use of Off-peak window. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.10.0](service/rum/CHANGELOG.md#v1100-2023-02-22) + * **Feature**: CloudWatch RUM now supports CloudWatch Custom Metrics +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.5](service/ssm/CHANGELOG.md#v1355-2023-02-22) + * **Documentation**: Document only update for Feb 2023 + +# Release (2023-02-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.32.0](service/quicksight/CHANGELOG.md#v1320-2023-02-21) + * **Feature**: S3 data sources now accept a custom IAM role. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.9.0](service/resiliencehub/CHANGELOG.md#v190-2023-02-21) + * **Feature**: In this release we improved resilience hub application creation and maintenance by introducing new resource and app component crud APIs, improving visibility and maintenance of application input sources and added support for additional information attributes to be provided by customers. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.4](service/securityhub/CHANGELOG.md#v1284-2023-02-21) + * **Documentation**: Documentation updates for AWS Security Hub +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.0.0](service/tnb/CHANGELOG.md#v100-2023-02-21) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Telco Network Builder (TNB). AWS Telco Network Builder is a network automation service that helps you deploy and manage telecom networks. + +# Release (2023-02-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.17.5 + * **Bug Fix**: fix int overflow bug on 32 bit architecture +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.24.0](service/auditmanager/CHANGELOG.md#v1240-2023-02-20) + * **Feature**: This release introduces a ServiceQuotaExceededException to the UpdateAssessmentFrameworkShare API operation. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.47.0](service/connect/CHANGELOG.md#v1470-2023-02-20) + * **Feature**: Reasons for failed diff has been approved by SDK Reviewer + +# Release (2023-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.17.0](service/apprunner/CHANGELOG.md#v1170-2023-02-17) + * **Feature**: This release supports removing MaxSize limit for AutoScalingConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.43.0](service/glue/CHANGELOG.md#v1430-2023-02-17) + * **Feature**: Release of Delta Lake Data Lake Format for Glue Studio Service + +# Release (2023-02-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.23.0](service/emr/CHANGELOG.md#v1230-2023-02-16) + * **Feature**: This release provides customers the ability to define a timeout period for procuring capacity during a resize operation for Instance Fleet clusters. Customers can specify this timeout using the ResizeSpecifications parameter supported by RunJobFlow, ModifyInstanceFleet and AddInstanceFleet APIs. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.0](service/grafana/CHANGELOG.md#v1120-2023-02-16) + * **Feature**: With this release Amazon Managed Grafana now supports inbound Network Access Control that helps you to restrict user access to your Grafana workspaces +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.3](service/ivs/CHANGELOG.md#v1203-2023-02-16) + * **Documentation**: Doc-only update. Updated text description in DeleteChannel, Stream, and StreamSummary. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.1](service/wafv2/CHANGELOG.md#v1251-2023-02-16) + * **Documentation**: Added a notice for account takeover prevention (ATP). The interface incorrectly lets you to configure ATP response inspection in regional web ACLs in Region US East (N. Virginia), without returning an error. ATP response inspection is only available in web ACLs that protect CloudFront distributions. + +# Release (2023-02-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.3](service/accessanalyzer/CHANGELOG.md#v1193-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.1](service/account/CHANGELOG.md#v191-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.3](service/acm/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.2](service/acmpca/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.2](service/alexaforbusiness/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.2](service/amp/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.2](service/amplify/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.2](service/amplifybackend/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.2](service/amplifyuibuilder/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.3](service/apigateway/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.2](service/apigatewaymanagementapi/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.3](service/apigatewayv2/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.1](service/appconfig/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.1](service/appconfigdata/CHANGELOG.md#v161-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.2](service/appflow/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.2](service/appintegrations/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.3](service/applicationautoscaling/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.2](service/applicationcostprofiler/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.2](service/applicationdiscoveryservice/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.3](service/applicationinsights/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.2](service/appmesh/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.2](service/apprunner/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.2](service/appstream/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.2](service/appsync/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.3](service/arczonalshift/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.2](service/athena/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.2](service/auditmanager/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.2](service/autoscalingplans/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.1](service/backup/CHANGELOG.md#v1201-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.2](service/backupgateway/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.2](service/backupstorage/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.3](service/batch/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.2](service/billingconductor/CHANGELOG.md#v152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.2](service/braket/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.2](service/budgets/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.2](service/chime/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.2](service/chimesdkidentity/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.2](service/chimesdkmediapipelines/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.3](service/chimesdkmeetings/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.2](service/chimesdkmessaging/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.2](service/chimesdkvoice/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.2](service/cleanrooms/CHANGELOG.md#v102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.2](service/cloud9/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.3](service/cloudcontrol/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.2](service/clouddirectory/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.2](service/cloudhsm/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.2](service/cloudhsmv2/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.2](service/cloudsearchdomain/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.24.0](service/cloudtrail/CHANGELOG.md#v1240-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release adds an InsufficientEncryptionPolicyException type to the StartImport endpoint + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.2](service/cloudtraildata/CHANGELOG.md#v102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.3](service/cloudwatchevents/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.3](service/cloudwatchlogs/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.2](service/codeartifact/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.3](service/codebuild/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.2](service/codecatalyst/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.2](service/codecommit/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.3](service/codedeploy/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.2](service/codeguruprofiler/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.2](service/codegurureviewer/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.2](service/codepipeline/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.2](service/codestar/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.2](service/codestarconnections/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.2](service/codestarnotifications/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.2](service/cognitoidentity/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.2](service/cognitoidentityprovider/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.2](service/cognitosync/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.2](service/comprehend/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.2](service/comprehendmedical/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.1](service/computeoptimizer/CHANGELOG.md#v1211-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.3](service/configservice/CHANGELOG.md#v1293-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.1](service/connect/CHANGELOG.md#v1461-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.3](service/connectcampaigns/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.3](service/connectcases/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.2](service/connectcontactlens/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.2](service/connectparticipant/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.2](service/controltower/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.2](service/costandusagereportservice/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.2](service/costexplorer/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.1](service/customerprofiles/CHANGELOG.md#v1231-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.3](service/databasemigrationservice/CHANGELOG.md#v1233-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.3](service/databrew/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.2](service/dataexchange/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.2](service/datapipeline/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.1](service/datasync/CHANGELOG.md#v1221-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.2](service/dax/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.2](service/detective/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.2](service/devicefarm/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.2](service/devopsguru/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.3](service/directconnect/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.3](service/directoryservice/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.4](service/dlm/CHANGELOG.md#v1144-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.2](service/docdbelastic/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.2](service/drs/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.3](service/dynamodb/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.3](service/dynamodbstreams/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.4](service/ebs/CHANGELOG.md#v1164-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.2](service/ec2instanceconnect/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.3](service/ecr/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.2](service/ecrpublic/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.3](service/ecs/CHANGELOG.md#v1233-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.4](service/efs/CHANGELOG.md#v1194-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. + * **Documentation**: Documentation update for EFS to support IAM best practices. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.3](service/eks/CHANGELOG.md#v1273-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.2](service/elasticinference/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.3](service/elasticsearchservice/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.2](service/elastictranscoder/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.3](service/emr/CHANGELOG.md#v1223-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.1](service/emrcontainers/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.2](service/emrserverless/CHANGELOG.md#v152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.3](service/eventbridge/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.2](service/evidently/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.2](service/finspace/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.2](service/finspacedata/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.3](service/firehose/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.2](service/fis/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.3](service/fms/CHANGELOG.md#v1223-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.2](service/forecast/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.2](service/forecastquery/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.23.0](service/frauddetector/CHANGELOG.md#v1230-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release introduces Lists feature which allows customers to reference a set of values in Fraud Detector's rules. With Lists, customers can dynamically manage these attributes in real time. Lists can be created/deleted and its contents can be modified using the Fraud Detector API. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.3](service/fsx/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.2](service/gamelift/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.2](service/gamesparks/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.3](service/glacier/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.2](service/globalaccelerator/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.42.0](service/glue/CHANGELOG.md#v1420-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: Fix DirectJDBCSource not showing up in CLI code gen + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.2](service/grafana/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.3](service/greengrass/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.3](service/greengrassv2/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.2](service/groundstation/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.3](service/guardduty/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.2](service/health/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.2](service/healthlake/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.2](service/honeycode/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.2](service/identitystore/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.2](service/imagebuilder/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.2](service/inspector/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.3](service/inspector2/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.2](service/iot/CHANGELOG.md#v1342-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.2](service/iot1clickdevicesservice/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.2](service/iot1clickprojects/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.2](service/iotanalytics/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.2](service/iotdataplane/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.2](service/iotdeviceadvisor/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.2](service/iotevents/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.2](service/ioteventsdata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.2](service/iotfleethub/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.2](service/iotfleetwise/CHANGELOG.md#v132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.2](service/iotjobsdataplane/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.2](service/iotroborunner/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.2](service/iotsecuretunneling/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.2](service/iotsitewise/CHANGELOG.md#v1272-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.2](service/iotthingsgraph/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.2](service/iottwinmaker/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.2](service/iotwireless/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.2](service/ivs/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.2](service/ivschat/CHANGELOG.md#v132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.2](service/kafka/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.2](service/kafkaconnect/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.3](service/kendra/CHANGELOG.md#v1383-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.4](service/kendraranking/CHANGELOG.md#v104-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.2](service/keyspaces/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.4](service/kinesis/CHANGELOG.md#v1174-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.2](service/kinesisanalytics/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.2](service/kinesisanalyticsv2/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.3](service/kinesisvideo/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.3](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.3](service/kinesisvideomedia/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.3](service/kinesisvideosignaling/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.3](service/kinesisvideowebrtcstorage/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.3](service/kms/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.2](service/lakeformation/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.2](service/lambda/CHANGELOG.md#v1292-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.2](service/lexmodelbuildingservice/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.1](service/lexmodelsv2/CHANGELOG.md#v1281-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.2](service/lexruntimeservice/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.1](service/lexruntimev2/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.2](service/licensemanager/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.2](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.2](service/licensemanagerusersubscriptions/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.3](service/lightsail/CHANGELOG.md#v1253-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.2](service/location/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.2](service/lookoutequipment/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.2](service/lookoutmetrics/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.2](service/lookoutvision/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.2](service/m2/CHANGELOG.md#v142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.2](service/machinelearning/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.2](service/macie/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.2](service/macie2/CHANGELOG.md#v1262-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.2](service/managedblockchain/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.2](service/marketplacecatalog/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.2](service/marketplacecommerceanalytics/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.2](service/marketplaceentitlementservice/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.3](service/marketplacemetering/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.2](service/mediaconnect/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.1](service/mediaconvert/CHANGELOG.md#v1311-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.2](service/medialive/CHANGELOG.md#v1292-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.2](service/mediapackage/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.2](service/mediapackagevod/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.2](service/mediastore/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.2](service/mediastoredata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.2](service/mediatailor/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.2](service/memorydb/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.2](service/mgn/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.2](service/migrationhub/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.2](service/migrationhubconfig/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.2](service/migrationhuborchestrator/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.1](service/migrationhubrefactorspaces/CHANGELOG.md#v191-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.2](service/migrationhubstrategy/CHANGELOG.md#v172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.2](service/mobile/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.2](service/mq/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.2](service/mturk/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.2](service/mwaa/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.2](service/networkfirewall/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.2](service/networkmanager/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.2](service/nimble/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.3](service/oam/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.2](service/omics/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.2](service/opensearch/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.3](service/opensearchserverless/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.2](service/opsworks/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.2](service/opsworkscm/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.2](service/organizations/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.2](service/outposts/CHANGELOG.md#v1272-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.2](service/panorama/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.2](service/personalize/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.2](service/personalizeevents/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.2](service/personalizeruntime/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.3](service/pi/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.2](service/pinpoint/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.2](service/pinpointemail/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.2](service/pinpointsmsvoice/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.2](service/pinpointsmsvoicev2/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.2](service/pipes/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.1](service/polly/CHANGELOG.md#v1251-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.2](service/pricing/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.2.0](service/privatenetworks/CHANGELOG.md#v120-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release introduces a new StartNetworkResourceUpdate API, which enables return/replacement of hardware from a NetworkSite. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.1](service/proton/CHANGELOG.md#v1201-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.2](service/qldb/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.2](service/qldbsession/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.2](service/quicksight/CHANGELOG.md#v1312-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.3](service/ram/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.3](service/rbin/CHANGELOG.md#v183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.3](service/rds/CHANGELOG.md#v1403-2023-02-15) + * **Documentation**: Database Activity Stream support for RDS for SQL Server. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.2](service/rdsdata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.2](service/redshiftdata/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.3](service/redshiftserverless/CHANGELOG.md#v143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.2](service/rekognition/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.2](service/resiliencehub/CHANGELOG.md#v182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.3](service/resourceexplorer2/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.3](service/resourcegroups/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.3](service/resourcegroupstaggingapi/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.2](service/robomaker/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.2](service/rolesanywhere/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.2](service/route53domains/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.2](service/route53recoverycluster/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.2](service/route53recoverycontrolconfig/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.2](service/route53recoveryreadiness/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.3](service/route53resolver/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.2](service/rum/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.2](service/s3outposts/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.1](service/sagemaker/CHANGELOG.md#v1681-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.2](service/sagemakera2iruntime/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.2](service/sagemakeredge/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.2](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.2](service/sagemakergeospatial/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.5](service/sagemakermetrics/CHANGELOG.md#v105-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.3](service/sagemakerruntime/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.2](service/savingsplans/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.2](service/scheduler/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.2](service/schemas/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.4](service/secretsmanager/CHANGELOG.md#v1184-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.2](service/securityhub/CHANGELOG.md#v1282-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.2](service/securitylake/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.2](service/serverlessapplicationrepository/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.2](service/servicecatalog/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.3](service/servicecatalogappregistry/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.2](service/servicediscovery/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.3](service/servicequotas/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.2](service/sesv2/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.3](service/sfn/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.2](service/shield/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.2](service/signer/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.2](service/simspaceweaver/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.2](service/sms/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.1](service/snowball/CHANGELOG.md#v1181-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.2](service/snowdevicemanagement/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.3](service/ssm/CHANGELOG.md#v1353-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.2](service/ssmcontacts/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.2](service/ssmincidents/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.2](service/ssmsap/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.2](service/sso/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.2](service/ssoadmin/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.2](service/ssooidc/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.3](service/storagegateway/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.2](service/support/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.2](service/supportapp/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.4](service/swf/CHANGELOG.md#v1144-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.3](service/synthetics/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.2](service/textract/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.2](service/timestreamquery/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.2](service/timestreamwrite/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.2](service/transcribe/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.2](service/transcribestreaming/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.3](service/transfer/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.2](service/translate/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.2](service/voiceid/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.2](service/waf/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.3](service/wafregional/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.0](service/wafv2/CHANGELOG.md#v1250-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: For protected CloudFront distributions, you can now use the AWS WAF Fraud Control account takeover prevention (ATP) managed rule group to block new login attempts from clients that have recently submitted too many failed login attempts. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.2](service/wellarchitected/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.2](service/wisdom/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.3](service/workdocs/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.2](service/worklink/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.2](service/workmail/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.2](service/workmailmessageflow/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.3](service/workspaces/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.2](service/workspacesweb/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.3](service/xray/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. + +# Release (2023-02-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.0](service/appconfig/CHANGELOG.md#v1170-2023-02-14) + * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. Version labels allow you to identify specific hosted configuration versions based on an alternate versioning scheme that you define. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.0](service/datasync/CHANGELOG.md#v1220-2023-02-14) + * **Feature**: With this launch, we are giving customers the ability to use older SMB protocol versions, enabling them to use DataSync to copy data to and from their legacy storage arrays. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.86.0](service/ec2/CHANGELOG.md#v1860-2023-02-14) + * **Feature**: With this release customers can turn host maintenance on or off when allocating or modifying a supported dedicated host. Host maintenance is turned on by default for supported hosts. + +# Release (2023-02-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.0](service/account/CHANGELOG.md#v190-2023-02-13) + * **Feature**: This release of the Account Management API enables customers to view and manage whether AWS Opt-In Regions are enabled or disabled for their Account. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.0](service/appconfigdata/CHANGELOG.md#v160-2023-02-13) + * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. If a labeled hosted configuration version is deployed, its version label is available in the GetLatestConfiguration response. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.0](service/snowball/CHANGELOG.md#v1180-2023-02-13) + * **Feature**: Adds support for EKS Anywhere on Snowball. AWS Snow Family customers can now install EKS Anywhere service on Snowball Edge Compute Optimized devices. + +# Release (2023-02-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.27.0](service/autoscaling/CHANGELOG.md#v1270-2023-02-10) + * **Feature**: You can now either terminate/replace, ignore, or wait for EC2 Auto Scaling instances on standby or protected from scale in. Also, you can also roll back changes from a failed instance refresh. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.0](service/connect/CHANGELOG.md#v1460-2023-02-10) + * **Feature**: This update provides the Wisdom session ARN for contacts enabled for Wisdom in the chat channel. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.85.0](service/ec2/CHANGELOG.md#v1850-2023-02-10) + * **Feature**: Adds support for waiters that automatically poll for an imported snapshot until it reaches the completed state. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.0](service/polly/CHANGELOG.md#v1250-2023-02-10) + * **Feature**: Amazon Polly adds two new neural Japanese voices - Kazuha, Tomoko +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.0](service/sagemaker/CHANGELOG.md#v1680-2023-02-10) + * **Feature**: Amazon SageMaker Autopilot adds support for selecting algorithms in CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.2](service/sns/CHANGELOG.md#v1202-2023-02-10) + * **Documentation**: This release adds support for SNS X-Ray active tracing as well as other updates. + +# Release (2023-02-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.2](service/chimesdkmeetings/CHANGELOG.md#v1142-2023-02-09) + * **Documentation**: Documentation updates for Chime Meetings SDK +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.0](service/emrcontainers/CHANGELOG.md#v1170-2023-02-09) + * **Feature**: EMR on EKS allows configuring retry policies for job runs through the StartJobRun API. Using retry policies, a job cause a driver pod to be restarted automatically if it fails or is deleted. The job's status can be seen in the DescribeJobRun and ListJobRun APIs and monitored using CloudWatch events. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.0](service/lexmodelsv2/CHANGELOG.md#v1280-2023-02-09) + * **Feature**: AWS Lex now supports Network of Bots. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.0](service/lexruntimev2/CHANGELOG.md#v1170-2023-02-09) + * **Feature**: AWS Lex now supports Network of Bots. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.2](service/lightsail/CHANGELOG.md#v1252-2023-02-09) + * **Documentation**: Documentation updates for Lightsail +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.0](service/migrationhubrefactorspaces/CHANGELOG.md#v190-2023-02-09) + * **Feature**: This release adds support for creating environments with a network fabric type of NONE +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.2](service/workdocs/CHANGELOG.md#v1132-2023-02-09) + * **Documentation**: Doc only update for the WorkDocs APIs. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.2](service/workspaces/CHANGELOG.md#v1282-2023-02-09) + * **Documentation**: Removed Windows Server 2016 BYOL and made changes based on IAM campaign. + +# Release (2023-02-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.0](service/backup/CHANGELOG.md#v1200-2023-02-08) + * **Feature**: This release added one attribute (resource name) in the output model of our 9 existing APIs in AWS backup so that customers will see the resource name at the output. No input required from Customers. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.25.0](service/cloudfront/CHANGELOG.md#v1250-2023-02-08) + * **Feature**: CloudFront Origin Access Control extends support to AWS Elemental MediaStore origins. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.41.0](service/glue/CHANGELOG.md#v1410-2023-02-08) + * **Feature**: DirectJDBCSource + Glue 4.0 streaming options + +# Release (2023-02-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.2](service/transfer/CHANGELOG.md#v1282-2023-02-07) + * **Documentation**: Updated the documentation for the ImportCertificate API call, and added examples. + +# Release (2023-02-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.0](service/computeoptimizer/CHANGELOG.md#v1210-2023-02-06) + * **Feature**: AWS Compute optimizer can now infer if Kafka is running on an instance. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.0](service/customerprofiles/CHANGELOG.md#v1230-2023-02-06) + * **Feature**: This release deprecates the PartyType and Gender enum data types from the Profile model and replaces them with new PartyTypeString and GenderString attributes, which accept any string of length up to 255. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.22.0](service/frauddetector/CHANGELOG.md#v1220-2023-02-06) + * **Feature**: My AWS Service (Amazon Fraud Detector) - This release introduces Cold Start Model Training which optimizes training for small datasets and adds intelligent methods for treating unlabeled data. You can now train Online Fraud Insights or Transaction Fraud Insights models with minimal historical-data. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.0](service/mediaconvert/CHANGELOG.md#v1310-2023-02-06) + * **Feature**: The AWS Elemental MediaConvert SDK has added improved scene change detection capabilities and a bandwidth reduction filter, along with video quality enhancements, to the AVC encoder. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.0](service/outposts/CHANGELOG.md#v1270-2023-02-06) + * **Feature**: Adds OrderType to Order structure. Adds PreviousOrderId and PreviousLineItemId to LineItem structure. Adds new line item status REPLACED. Increases maximum length of pagination token. + +# Release (2023-02-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.2](service/autoscaling/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.2](service/cloudformation/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.1](service/cloudsearch/CHANGELOG.md#v1141-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.2](service/cloudwatch/CHANGELOG.md#v1252-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.2](service/docdb/CHANGELOG.md#v1202-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.1](service/ec2/CHANGELOG.md#v1841-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.2](service/elasticache/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.1](service/elasticbeanstalk/CHANGELOG.md#v1151-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.2](service/elasticloadbalancing/CHANGELOG.md#v1152-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.3](service/elasticloadbalancingv2/CHANGELOG.md#v1193-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.2](service/iam/CHANGELOG.md#v1192-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.2](service/neptune/CHANGELOG.md#v1192-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.0](service/proton/CHANGELOG.md#v1200-2023-02-03) + * **Feature**: Add new GetResourcesSummary API +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.2](service/rds/CHANGELOG.md#v1402-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.2](service/redshift/CHANGELOG.md#v1272-2023-02-03) + * **Documentation**: Corrects descriptions of the parameters for the API operations RestoreFromClusterSnapshot, RestoreTableFromClusterSnapshot, and CreateCluster. + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.1](service/ses/CHANGELOG.md#v1151-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.1](service/sns/CHANGELOG.md#v1201-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.2](service/sqs/CHANGELOG.md#v1202-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.3](service/sts/CHANGELOG.md#v1183-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# Release (2023-02-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.16.0](service/appconfig/CHANGELOG.md#v1160-2023-02-02) + * **Feature**: AWS AppConfig introduces KMS customer-managed key (CMK) encryption of configuration data, along with AWS Secrets Manager as a new configuration data source. S3 objects using SSE-KMS encryption and SSM Parameter Store SecureStrings are also now supported. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.0](service/ec2/CHANGELOG.md#v1840-2023-02-02) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.2](service/elasticloadbalancingv2/CHANGELOG.md#v1192-2023-02-02) + * **Documentation**: The GWLB Flex Health Check project updates the default values of healthy-threshold-count from 3 to 5 and unhealthy-threshold-count from 3 to 2 +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.0](service/quicksight/CHANGELOG.md#v1310-2023-02-02) + * **Feature**: QuickSight support for Radar Chart and Dashboard Publish Options + +# Release (2023-02-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.0](service/devopsguru/CHANGELOG.md#v1220-2023-02-01) + * **Feature**: This release adds filter support ListAnomalyForInsight API. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.0](service/forecast/CHANGELOG.md#v1250-2023-02-01) + * **Feature**: This release will enable customer select INCREMENTAL as ImportModel in Forecast's CreateDatasetImportJob API. Verified latest SDK containing required attribute, following https://w.amazon.com/bin/view/AWS-Seer/Launch/Trebuchet/ +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.1](service/iam/CHANGELOG.md#v1191-2023-02-01) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.0](service/mediatailor/CHANGELOG.md#v1220-2023-02-01) + * **Feature**: The AWS Elemental MediaTailor SDK for Channel Assembly has added support for program updates, and the ability to clip the end of VOD sources in programs. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.0](service/sns/CHANGELOG.md#v1200-2023-02-01) + * **Feature**: Additional attributes added for set-topic-attributes. + +# Release (2023-01-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.0](service/appsync/CHANGELOG.md#v1190-2023-01-31) + * **Feature**: This release introduces the feature to support EventBridge as AppSync data source. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.23.0](service/cloudtrail/CHANGELOG.md#v1230-2023-01-31) + * **Feature**: Add new "Channel" APIs to enable users to manage channels used for CloudTrail Lake integrations, and "Resource Policy" APIs to enable users to manage the resource-based permissions policy attached to a channel. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.0](service/cloudtraildata/CHANGELOG.md#v100-2023-01-31) + * **Release**: New AWS service client module + * **Feature**: Add CloudTrail Data Service to enable users to ingest activity events from non-AWS sources into CloudTrail Lake. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.0](service/codeartifact/CHANGELOG.md#v1160-2023-01-31) + * **Feature**: This release introduces a new DeletePackage API, which enables deletion of a package and all of its versions from a repository. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.83.0](service/ec2/CHANGELOG.md#v1830-2023-01-31) + * **Feature**: This launch allows customers to associate up to 8 IP addresses to their NAT Gateways to increase the limit on concurrent connections to a single destination by eight times from 55K to 440K. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.0](service/groundstation/CHANGELOG.md#v1170-2023-01-31) + * **Feature**: DigIF Expansion changes to the Customer APIs. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.0](service/iot/CHANGELOG.md#v1340-2023-01-31) + * **Feature**: Added support for IoT Rules Engine Cloudwatch Logs action batch mode. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.0](service/opensearch/CHANGELOG.md#v1140-2023-01-31) + * **Feature**: Amazon OpenSearch Service adds the option for a VPC endpoint connection between two domains when the local domain uses OpenSearch version 1.3 or 2.3. You can now use remote reindex to copy indices from one VPC domain to another without a reverse proxy. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.24.0](service/polly/CHANGELOG.md#v1240-2023-01-31) + * **Feature**: Amazon Polly adds two new neural American English voices - Ruth, Stephen +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.67.0](service/sagemaker/CHANGELOG.md#v1670-2023-01-31) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports more completion criteria for Hyperparameter Optimization. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.0](service/securityhub/CHANGELOG.md#v1280-2023-01-31) + * **Feature**: New fields have been added to the AWS Security Finding Format. Compliance.SecurityControlId is a unique identifier for a security control across standards. Compliance.AssociatedStandards contains all enabled standards in which a security control is enabled. + +# Release (2023-01-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.0](service/cloudformation/CHANGELOG.md#v1260-2023-01-30) + * **Feature**: This feature provides a method of obtaining which regions a stackset has stack instances deployed in. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.82.0](service/ec2/CHANGELOG.md#v1820-2023-01-30) + * **Feature**: We add Prefix Lists as a new route destination option for LocalGatewayRoutes. This will allow customers to create routes to Prefix Lists. Prefix List routes will allow customers to group individual CIDR routes with the same target into a single route. + +# Release (2023-01-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.0](service/appstream/CHANGELOG.md#v1200-2023-01-27) + * **Feature**: Fixing the issue where Appstream waiters hang for fleet_started and fleet_stopped. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.21.0](service/mediatailor/CHANGELOG.md#v1210-2023-01-27) + * **Feature**: This release introduces the As Run logging type, along with API and documentation updates. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.26.0](service/outposts/CHANGELOG.md#v1260-2023-01-27) + * **Feature**: Adding support for payment term in GetOrder, CreateOrder responses. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.66.0](service/sagemaker/CHANGELOG.md#v1660-2023-01-27) + * **Feature**: This release supports running SageMaker Training jobs with container images that are in a private Docker registry. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.0](service/sagemakerruntime/CHANGELOG.md#v1180-2023-01-27) + * **Feature**: Amazon SageMaker Runtime which supports InvokeEndpointAsync asynchronously can now invoke endpoints with custom timeout values. Asynchronous invocations support longer processing times. + +# Release (2023-01-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.0](service/eventbridge/CHANGELOG.md#v1180-2023-01-26) + * **Feature**: Minor comments for Redshift Serverless workgroup target support. + +# Release (2023-01-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.81.0](service/ec2/CHANGELOG.md#v1810-2023-01-25) + * **Feature**: This release adds new functionality that allows customers to provision IPv6 CIDR blocks through Amazon VPC IP Address Manager (IPAM) as well as allowing customers to utilize IPAM Resource Discovery APIs. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.0](service/m2/CHANGELOG.md#v140-2023-01-25) + * **Feature**: Add returnCode, batchJobIdentifier in GetBatchJobExecution response, for user to view the batch job execution result & unique identifier from engine. Also removed unused headers from REST APIs +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.23.0](service/polly/CHANGELOG.md#v1230-2023-01-25) + * **Feature**: Add 5 new neural voices - Sergio (es-ES), Andres (es-MX), Remi (fr-FR), Adriano (it-IT) and Thiago (pt-BR). +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.1](service/redshiftserverless/CHANGELOG.md#v141-2023-01-25) + * **Documentation**: Added query monitoring rules as possible parameters for create and update workgroup operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.65.0](service/sagemaker/CHANGELOG.md#v1650-2023-01-25) + * **Feature**: SageMaker Inference Recommender now decouples from Model Registry and could accept Model Name to invoke inference recommendations job; Inference Recommender now provides CPU/Memory Utilization metrics data in recommendation output. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.2](service/sts/CHANGELOG.md#v1182-2023-01-25) + * **Documentation**: Doc only change to update wording in a key topic + +# Release (2023-01-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.27.0](service/route53/CHANGELOG.md#v1270-2023-01-24) + * **Feature**: Amazon Route 53 now supports the Asia Pacific (Melbourne) Region (ap-southeast-4) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.0](service/ssmsap/CHANGELOG.md#v120-2023-01-24) + * **Feature**: This release provides updates to documentation and support for listing operations performed by AWS Systems Manager for SAP. + +# Release (2023-01-23) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.0](service/lambda/CHANGELOG.md#v1290-2023-01-23) + * **Feature**: Release Lambda RuntimeManagementConfig, enabling customers to better manage runtime updates to their Lambda functions. This release adds two new APIs, GetRuntimeManagementConfig and PutRuntimeManagementConfig, as well as support on existing Create/Get/Update function APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.64.0](service/sagemaker/CHANGELOG.md#v1640-2023-01-23) + * **Feature**: Amazon SageMaker Inference now supports P4de instance types. + +# Release (2023-01-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.80.0](service/ec2/CHANGELOG.md#v1800-2023-01-20) + * **Feature**: C6in, M6in, M6idn, R6in and R6idn instances are powered by 3rd Generation Intel Xeon Scalable processors (code named Ice Lake) with an all-core turbo frequency of 3.5 GHz. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.0](service/ivs/CHANGELOG.md#v1200-2023-01-20) + * **Feature**: API and Doc update. Update to arns field in BatchGetStreamKey. Also updates to operations and structures. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.30.0](service/quicksight/CHANGELOG.md#v1300-2023-01-20) + * **Feature**: This release adds support for data bars in QuickSight table and increases pivot table field well limit. + +# Release (2023-01-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.0](service/appflow/CHANGELOG.md#v1240-2023-01-19) + * **Feature**: Adding support for Salesforce Pardot connector in Amazon AppFlow. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.0](service/cloudwatchlogs/CHANGELOG.md#v1200-2023-01-19) + * **Feature**: Bug fix - Removed the regex pattern validation from CoralModel to avoid potential security issue. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.15.0](service/codeartifact/CHANGELOG.md#v1150-2023-01-19) + * **Feature**: Documentation updates for CodeArtifact +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.45.0](service/connect/CHANGELOG.md#v1450-2023-01-19) + * **Feature**: Amazon Connect Chat introduces Persistent Chat, allowing customers to resume previous conversations with context and transcripts carried over from previous chats, eliminating the need to repeat themselves and allowing agents to provide personalized service with access to entire conversation history. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.0](service/connectparticipant/CHANGELOG.md#v1150-2023-01-19) + * **Feature**: This release updates Amazon Connect Participant's GetTranscript api to provide transcripts of past chats on a persistent chat session. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.79.0](service/ec2/CHANGELOG.md#v1790-2023-01-19) + * **Feature**: Adds SSM Parameter Resource Aliasing support to EC2 Launch Templates. Launch Templates can now store parameter aliases in place of AMI Resource IDs. CreateLaunchTemplateVersion and DescribeLaunchTemplateVersions now support a convenience flag, ResolveAlias, to return the resolved parameter value. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.40.0](service/glue/CHANGELOG.md#v1400-2023-01-19) + * **Feature**: Release Glue Studio Hudi Data Lake Format for SDK/CLI +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.16.0](service/groundstation/CHANGELOG.md#v1160-2023-01-19) + * **Feature**: Add configurable prepass and postpass times for DataflowEndpointGroup. Add Waiter to allow customers to wait for a contact that was reserved through ReserveContact +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.0](service/medialive/CHANGELOG.md#v1290-2023-01-19) + * **Feature**: AWS Elemental MediaLive adds support for SCTE 35 preRollMilliSeconds. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.13.0](service/opensearch/CHANGELOG.md#v1130-2023-01-19) + * **Feature**: This release adds the enhanced dry run option, that checks for validation errors that might occur when deploying configuration changes and provides a summary of these errors, if any. The feature will also indicate whether a blue/green deployment will be required to apply a change. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.0](service/panorama/CHANGELOG.md#v1110-2023-01-19) + * **Feature**: Added AllowMajorVersionUpdate option to OTAJobConfig to make appliance software major version updates opt-in. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.63.0](service/sagemaker/CHANGELOG.md#v1630-2023-01-19) + * **Feature**: HyperParameterTuningJobs now allow passing environment variables into the corresponding TrainingJobs + +# Release (2023-01-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.0](service/cloudwatch/CHANGELOG.md#v1250-2023-01-18) + * **Feature**: Enable cross-account streams in CloudWatch Metric Streams via Observability Access Manager. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.1](service/efs/CHANGELOG.md#v1191-2023-01-18) + * **Documentation**: Documentation updates for EFS access points limit increase +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.2](service/wafv2/CHANGELOG.md#v1242-2023-01-18) + * **Documentation**: Improved the visibility of the guidance for updating AWS WAF resources, such as web ACLs and rule groups. + +# Release (2023-01-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.0](service/billingconductor/CHANGELOG.md#v150-2023-01-17) + * **Feature**: This release adds support for SKU Scope for pricing plans. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.0](service/imagebuilder/CHANGELOG.md#v1220-2023-01-17) + * **Feature**: Add support for AWS Marketplace product IDs as input during CreateImageRecipe for the parent-image parameter. Add support for listing third-party components. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.0](service/networkfirewall/CHANGELOG.md#v1240-2023-01-17) + * **Feature**: Network Firewall now allows creation of dual stack endpoints, enabling inspection of IPv6 traffic. + +# Release (2023-01-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.44.0](service/connect/CHANGELOG.md#v1440-2023-01-13) + * **Feature**: This release updates the responses of UpdateContactFlowContent, UpdateContactFlowMetadata, UpdateContactFlowName and DeleteContactFlow API with empty responses. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.78.0](service/ec2/CHANGELOG.md#v1780-2023-01-13) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.25.0](service/outposts/CHANGELOG.md#v1250-2023-01-13) + * **Feature**: This release adds POWER_30_KVA as an option for PowerDrawKva. PowerDrawKva is part of the RackPhysicalProperties structure in the CreateSite request. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.0](service/resourcegroups/CHANGELOG.md#v1140-2023-01-13) + * **Feature**: AWS Resource Groups customers can now turn on Group Lifecycle Events in their AWS account. When you turn this on, Resource Groups monitors your groups for changes to group state or membership. Those changes are sent to Amazon EventBridge as events that you can respond to using rules you create. + +# Release (2023-01-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.0](service/cleanrooms/CHANGELOG.md#v100-2023-01-12) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Clean Rooms +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.19.0](service/cloudwatchlogs/CHANGELOG.md#v1190-2023-01-12) + * **Feature**: Bug fix: logGroupName is now not a required field in GetLogEvents, FilterLogEvents, GetLogGroupFields, and DescribeLogStreams APIs as logGroupIdentifier can be provided instead +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.28.0](service/lambda/CHANGELOG.md#v1280-2023-01-12) + * **Feature**: Add support for MaximumConcurrency parameter for SQS event source. Customers can now limit the maximum concurrent invocations for their SQS Event Source Mapping. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.30.0](service/mediaconvert/CHANGELOG.md#v1300-2023-01-12) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for compact DASH manifest generation, audio normalization using TruePeak measurements, and the ability to clip the sample range in the color corrector. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.1](service/secretsmanager/CHANGELOG.md#v1181-2023-01-12) + * **Documentation**: Update documentation for new ListSecrets and DescribeSecret parameters + +# Release (2023-01-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.0](service/kendra/CHANGELOG.md#v1380-2023-01-11) + * **Feature**: This release adds support to new document types - RTF, XML, XSLT, MS_EXCEL, CSV, JSON, MD + +# Release (2023-01-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.0](service/location/CHANGELOG.md#v1210-2023-01-10) + * **Feature**: This release adds support for two new route travel models, Bicycle and Motorcycle which can be used with Grab data source. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.0](service/rds/CHANGELOG.md#v1400-2023-01-10) + * **Feature**: This release adds support for configuring allocated storage on the CreateDBInstanceReadReplica, RestoreDBInstanceFromDBSnapshot, and RestoreDBInstanceToPointInTime APIs. + +# Release (2023-01-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.0](service/ecrpublic/CHANGELOG.md#v1150-2023-01-09) + * **Feature**: This release for Amazon ECR Public makes several change to bring the SDK into sync with the API. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.0](service/kendraranking/CHANGELOG.md#v100-2023-01-09) + * **Release**: New AWS service client module + * **Feature**: Introducing Amazon Kendra Intelligent Ranking, a new set of Kendra APIs that leverages Kendra semantic ranking capabilities to improve the quality of search results from other search services (i.e. OpenSearch, ElasticSearch, Solr). +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.23.0](service/networkfirewall/CHANGELOG.md#v1230-2023-01-09) + * **Feature**: Network Firewall now supports the Suricata rule action reject, in addition to the actions pass, drop, and alert. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.0](service/workspacesweb/CHANGELOG.md#v190-2023-01-09) + * **Feature**: This release adds support for a new portal authentication type: AWS IAM Identity Center (successor to AWS Single Sign-On). + +# Release (2023-01-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.0](service/acmpca/CHANGELOG.md#v1210-2023-01-06) + * **Feature**: Added revocation parameter validation: bucket names must match S3 bucket naming rules and CNAMEs conform to RFC2396 restrictions on the use of special characters in URIs. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.0](service/auditmanager/CHANGELOG.md#v1230-2023-01-06) + * **Feature**: This release introduces a new data retention option in your Audit Manager settings. You can now use the DeregistrationPolicy parameter to specify if you want to delete your data when you deregister Audit Manager. + +# Release (2023-01-05) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.0](service/accessanalyzer/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.8.0](service/account/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.0](service/acm/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.20.0](service/acmpca/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.0](service/alexaforbusiness/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.0](service/amp/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.0](service/amplify/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.0](service/amplifybackend/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Updated GetBackendAPIModels response to include ModelIntrospectionSchema json string +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.0](service/amplifyuibuilder/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.0](service/apigateway/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.0](service/apigatewaymanagementapi/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.0](service/apigatewayv2/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.15.0](service/appconfig/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.5.0](service/appconfigdata/CHANGELOG.md#v150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.23.0](service/appflow/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.0](service/appintegrations/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.0](service/applicationautoscaling/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.0](service/applicationcostprofiler/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.0](service/applicationdiscoveryservice/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.0](service/applicationinsights/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.0](service/appmesh/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.0](service/apprunner/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: This release adds support of securely referencing secrets and configuration data that are stored in Secrets Manager and SSM Parameter Store by adding them as environment secrets in your App Runner service. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.19.0](service/appstream/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.18.0](service/appsync/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.0](service/arczonalshift/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.0](service/athena/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.22.0](service/auditmanager/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.0](service/autoscaling/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.0](service/autoscalingplans/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.19.0](service/backup/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.0](service/backupgateway/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.0](service/backupstorage/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.0](service/batch/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.4.0](service/billingconductor/CHANGELOG.md#v140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.0](service/braket/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.0](service/budgets/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.0](service/chime/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.0](service/chimesdkidentity/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.0](service/chimesdkmediapipelines/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.0](service/chimesdkmeetings/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.0](service/chimesdkmessaging/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.0](service/chimesdkvoice/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.0](service/cloud9/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.0](service/cloudcontrol/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.0](service/clouddirectory/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.25.0](service/cloudformation/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.24.0](service/cloudfront/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.0](service/cloudhsm/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.0](service/cloudhsmv2/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.0](service/cloudsearch/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.0](service/cloudsearchdomain/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.22.0](service/cloudtrail/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.24.0](service/cloudwatch/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.0](service/cloudwatchevents/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.18.0](service/cloudwatchlogs/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.14.0](service/codeartifact/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.0](service/codebuild/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.0](service/codecatalyst/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.0](service/codecommit/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.0](service/codedeploy/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.0](service/codeguruprofiler/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.0](service/codegurureviewer/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.0](service/codepipeline/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.0](service/codestar/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.0](service/codestarconnections/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.0](service/codestarnotifications/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.0](service/cognitoidentity/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.0](service/cognitoidentityprovider/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.0](service/cognitosync/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.0](service/comprehend/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.0](service/comprehendmedical/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.20.0](service/computeoptimizer/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.0](service/configservice/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.43.0](service/connect/CHANGELOG.md#v1430-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.0](service/connectcampaigns/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.0](service/connectcases/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.0](service/connectcontactlens/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.14.0](service/connectparticipant/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.0](service/controltower/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.0](service/costandusagereportservice/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.0](service/costexplorer/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.22.0](service/customerprofiles/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.0](service/databasemigrationservice/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.0](service/databrew/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.0](service/dataexchange/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.0](service/datapipeline/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.21.0](service/datasync/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.0](service/dax/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.0](service/detective/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.0](service/devicefarm/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.21.0](service/devopsguru/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.0](service/directconnect/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.0](service/directoryservice/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.0](service/dlm/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.0](service/docdb/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.0](service/docdbelastic/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.0](service/drs/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.0](service/dynamodb/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.0](service/dynamodbstreams/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.0](service/ebs/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.0](service/ec2instanceconnect/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.0](service/ecr/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.14.0](service/ecrpublic/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.0](service/ecs/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.0](service/efs/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.0](service/eks/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.0](service/elasticache/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.0](service/elasticbeanstalk/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.0](service/elasticinference/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.0](service/elasticloadbalancing/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.0](service/elasticloadbalancingv2/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.0](service/elasticsearchservice/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.0](service/elastictranscoder/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.0](service/emr/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.16.0](service/emrcontainers/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.0](service/emrserverless/CHANGELOG.md#v150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Adds support for customized images. You can now provide runtime images when creating or updating EMR Serverless Applications. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.17.0](service/eventbridge/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.0](service/evidently/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.0](service/finspace/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.0](service/finspacedata/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.0](service/firehose/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.0](service/fis/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.0](service/fms/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.24.0](service/forecast/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.0](service/forecastquery/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.21.0](service/frauddetector/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.0](service/fsx/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.0](service/gamelift/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.0](service/gamesparks/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.0](service/glacier/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.0](service/globalaccelerator/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.39.0](service/glue/CHANGELOG.md#v1390-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.0](service/grafana/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.0](service/greengrass/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.0](service/greengrassv2/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.15.0](service/groundstation/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.0](service/guardduty/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.0](service/health/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.0](service/healthlake/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.0](service/honeycode/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.0](service/iam/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.0](service/identitystore/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.21.0](service/imagebuilder/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.0](service/inspector/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.0](service/inspector2/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.33.0](service/iot/CHANGELOG.md#v1330-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.0](service/iot1clickdevicesservice/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.0](service/iot1clickprojects/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.0](service/iotanalytics/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.0](service/iotdataplane/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.0](service/iotdeviceadvisor/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.0](service/iotevents/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.0](service/ioteventsdata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.0](service/iotfleethub/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.0](service/iotfleetwise/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.0](service/iotjobsdataplane/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.0](service/iotroborunner/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.0](service/iotsecuretunneling/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.0](service/iotsitewise/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.0](service/iotthingsgraph/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.0](service/iottwinmaker/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.0](service/iotwireless/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.19.0](service/ivs/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.0](service/ivschat/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.0](service/kafka/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.0](service/kafkaconnect/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.37.0](service/kendra/CHANGELOG.md#v1370-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.0](service/keyspaces/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.0](service/kinesis/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.0](service/kinesisanalytics/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.0](service/kinesisanalyticsv2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.0](service/kinesisvideo/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.0](service/kinesisvideomedia/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.0](service/kinesisvideosignaling/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.0](service/kms/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.0](service/lakeformation/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.27.0](service/lambda/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.0](service/lexmodelbuildingservice/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.27.0](service/lexmodelsv2/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.0](service/lexruntimeservice/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.16.0](service/lexruntimev2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.0](service/licensemanager/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.0](service/lightsail/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Documentation**: Documentation updates for Amazon Lightsail. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.20.0](service/location/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.0](service/lookoutequipment/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.0](service/lookoutmetrics/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.0](service/lookoutvision/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.3.0](service/m2/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.0](service/machinelearning/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.0](service/macie/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.0](service/macie2/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.0](service/managedblockchain/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.0](service/marketplacecatalog/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.0](service/marketplaceentitlementservice/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.0](service/marketplacemetering/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.0](service/mediaconnect/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.29.0](service/mediaconvert/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.28.0](service/medialive/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.0](service/mediapackage/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.0](service/mediapackagevod/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.0](service/mediastore/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.0](service/mediastoredata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.20.0](service/mediatailor/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.0](service/memorydb/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.0](service/mgn/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.0](service/migrationhub/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.0](service/migrationhubconfig/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.0](service/migrationhuborchestrator/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.8.0](service/migrationhubrefactorspaces/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.0](service/migrationhubstrategy/CHANGELOG.md#v170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.0](service/mobile/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.0](service/mq/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.0](service/mturk/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.0](service/mwaa/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Documentation**: MWAA supports Apache Airflow version 2.4.3. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.0](service/neptune/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.22.0](service/networkfirewall/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.0](service/networkmanager/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.0](service/nimble/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.0](service/oam/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.0](service/omics/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.12.0](service/opensearch/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.0](service/opensearchserverless/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.0](service/opsworks/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.0](service/opsworkscm/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.0](service/organizations/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.24.0](service/outposts/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.10.0](service/panorama/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.0](service/personalize/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.0](service/personalizeevents/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.0](service/personalizeruntime/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.0](service/pi/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.0](service/pinpoint/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.0](service/pinpointemail/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.0](service/pinpointsmsvoice/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.0](service/pinpointsmsvoicev2/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.0](service/pipes/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.22.0](service/polly/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.0](service/pricing/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.1.0](service/privatenetworks/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.19.0](service/proton/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.0](service/qldb/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.0](service/qldbsession/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.29.0](service/quicksight/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.0](service/ram/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.0](service/rbin/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.39.0](service/rds/CHANGELOG.md#v1390-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: This release adds support for specifying which certificate authority (CA) to use for a DB instance's server certificate during DB instance creation, as well as other CA enhancements. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.0](service/rdsdata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.0](service/redshift/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.0](service/redshiftdata/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.0](service/redshiftserverless/CHANGELOG.md#v140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.0](service/rekognition/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.0](service/resiliencehub/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.0](service/resourceexplorer2/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.13.0](service/resourcegroups/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.0](service/robomaker/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.0](service/rolesanywhere/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.26.0](service/route53/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.0](service/route53domains/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.0](service/route53recoverycluster/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.0](service/route53recoveryreadiness/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.0](service/route53resolver/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.0](service/rum/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.30.0](service/s3/CHANGELOG.md#v1300-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.29.0](service/s3control/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.0](service/s3outposts/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.62.0](service/sagemaker/CHANGELOG.md#v1620-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.0](service/sagemakera2iruntime/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.0](service/sagemakeredge/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.0](service/sagemakergeospatial/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.17.0](service/sagemakerruntime/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.0](service/savingsplans/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.0](service/scheduler/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.0](service/schemas/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.0](service/secretsmanager/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.27.0](service/securityhub/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.0](service/securitylake/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.0](service/serverlessapplicationrepository/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.0](service/servicecatalog/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.0](service/servicecatalogappregistry/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.0](service/servicediscovery/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.0](service/servicequotas/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.0](service/ses/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.0](service/sesv2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.0](service/sfn/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.0](service/shield/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.0](service/signer/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.0](service/simspaceweaver/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.0](service/sms/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.17.0](service/snowball/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.0](service/snowdevicemanagement/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.19.0](service/sns/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.0](service/sqs/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.0](service/ssm/CHANGELOG.md#v1350-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.0](service/ssmcontacts/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.0](service/ssmincidents/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.1.0](service/ssmsap/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.0](service/sso/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.0](service/ssoadmin/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.0](service/ssooidc/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.0](service/storagegateway/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.0](service/sts/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.0](service/support/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.0](service/supportapp/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.0](service/swf/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.0](service/synthetics/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.0](service/textract/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.0](service/timestreamquery/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.0](service/timestreamwrite/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.0](service/transcribe/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.0](service/transcribestreaming/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.0](service/transfer/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.0](service/translate/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.0](service/voiceid/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.0](service/waf/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.0](service/wafregional/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.0](service/wafv2/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.0](service/wellarchitected/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.0](service/wisdom/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.0](service/workdocs/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.0](service/worklink/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.0](service/workmail/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.0](service/workmailmessageflow/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.0](service/workspaces/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.8.0](service/workspacesweb/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.0](service/xray/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# Release (2023-01-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.16.0](service/applicationautoscaling/CHANGELOG.md#v1160-2023-01-04) + * **Feature**: Customers can now use the existing DescribeScalingActivities API to also see the detailed and machine-readable reasons for Application Auto Scaling not scaling their resources and, if needed, take the necessary corrective actions. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.4](service/cloudwatchlogs/CHANGELOG.md#v1174-2023-01-04) + * **Documentation**: Update to remove sequenceToken as a required field in PutLogEvents calls. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.34.0](service/ssm/CHANGELOG.md#v1340-2023-01-04) + * **Feature**: Adding support for QuickSetup Document Type in Systems Manager + +# Release (2023-01-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.1.0](service/securitylake/CHANGELOG.md#v110-2023-01-03) + * **Feature**: Allow CreateSubscriber API to take string input that allows setting more descriptive SubscriberDescription field. Make souceTypes field required in model level for UpdateSubscriberRequest as it is required for every API call on the backend. Allow ListSubscribers take any String as nextToken param. + +# Release (2022-12-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.23.0](service/cloudfront/CHANGELOG.md#v1230-2022-12-30) + * **Feature**: Extend response headers policy to support removing headers from viewer responses +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.1](service/iotfleetwise/CHANGELOG.md#v121-2022-12-30) + * **Documentation**: Update documentation - correct the epoch constant value of default value for expiryTime field in CreateCampaign request. + +# Release (2022-12-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.28](service/apigateway/CHANGELOG.md#v11528-2022-12-29) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.21.0](service/emr/CHANGELOG.md#v1210-2022-12-29) + * **Feature**: Added GetClusterSessionCredentials API to allow Amazon SageMaker Studio to connect to EMR on EC2 clusters with runtime roles and AWS Lake Formation-based access control for Apache Spark, Apache Hive, and Presto queries. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.17.0](service/secretsmanager/CHANGELOG.md#v1170-2022-12-29) + * **Feature**: Added owning service filter, include planned deletion flag, and next rotation date response parameter in ListSecrets. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.11.0](service/wisdom/CHANGELOG.md#v1110-2022-12-29) + * **Feature**: This release extends Wisdom CreateContent and StartContentUpload APIs to support PDF and MicrosoftWord docx document uploading. + +# Release (2022-12-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.25.0](service/elasticache/CHANGELOG.md#v1250-2022-12-28) + * **Feature**: This release allows you to modify the encryption in transit setting, for existing Redis clusters. You can now change the TLS configuration of your Redis clusters without the need to re-build or re-provision the clusters or impact application availability. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.21.0](service/networkfirewall/CHANGELOG.md#v1210-2022-12-28) + * **Feature**: AWS Network Firewall now provides status messages for firewalls to help you troubleshoot when your endpoint fails. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.38.0](service/rds/CHANGELOG.md#v1380-2022-12-28) + * **Feature**: This release adds support for Custom Engine Version (CEV) on RDS Custom SQL Server. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.10.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1100-2022-12-28) + * **Feature**: Added support for Python paginators in the route53-recovery-control-config List* APIs. + +# Release (2022-12-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.11.0](service/memorydb/CHANGELOG.md#v1110-2022-12-27) + * **Feature**: This release adds support for MemoryDB Reserved nodes which provides a significant discount compared to on-demand node pricing. Reserved nodes are not physical nodes, but rather a billing discount applied to the use of on-demand nodes in your account. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.27.0](service/transfer/CHANGELOG.md#v1270-2022-12-27) + * **Feature**: Add additional operations to throw ThrottlingExceptions + +# Release (2022-12-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.42.0](service/connect/CHANGELOG.md#v1420-2022-12-23) + * **Feature**: Support for Routing Profile filter, SortCriteria, and grouping by Routing Profiles for GetCurrentMetricData API. Support for RoutingProfiles, UserHierarchyGroups, and Agents as filters, NextStatus and AgentStatusName for GetCurrentUserData. Adds ApproximateTotalCount to both APIs. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.13.0](service/connectparticipant/CHANGELOG.md#v1130-2022-12-23) + * **Feature**: Amazon Connect Chat introduces the Message Receipts feature. This feature allows agents and customers to receive message delivered and read receipts after they send a chat message. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.17.0](service/detective/CHANGELOG.md#v1170-2022-12-23) + * **Feature**: This release adds a missed AccessDeniedException type to several endpoints. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.27.0](service/fsx/CHANGELOG.md#v1270-2022-12-23) + * **Feature**: Fix a bug where a recent release might break certain existing SDKs. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.10.0](service/inspector2/CHANGELOG.md#v1100-2022-12-23) + * **Feature**: Amazon Inspector adds support for scanning NodeJS 18.x and Go 1.x AWS Lambda function runtimes. + +# Release (2022-12-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.19.0](service/computeoptimizer/CHANGELOG.md#v1190-2022-12-22) + * **Feature**: This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for ecs services running on Fargate. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.41.0](service/connect/CHANGELOG.md#v1410-2022-12-22) + * **Feature**: Amazon Connect Chat introduces the Idle Participant/Autodisconnect feature, which allows users to set timeouts relating to the activity of chat participants, using the new UpdateParticipantRoleConfig API. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.16.0](service/iotdeviceadvisor/CHANGELOG.md#v1160-2022-12-22) + * **Feature**: This release adds the following new features: 1) Documentation updates for IoT Device Advisor APIs. 2) Updated required request parameters for IoT Device Advisor APIs. 3) Added new service feature: ability to provide the test endpoint when customer executing the StartSuiteRun API. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.1.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v110-2022-12-22) + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.37.0](service/rds/CHANGELOG.md#v1370-2022-12-22) + * **Feature**: Add support for managing master user password in AWS Secrets Manager for the DBInstance and DBCluster. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.11](service/secretsmanager/CHANGELOG.md#v11611-2022-12-22) + * **Documentation**: Documentation updates for Secrets Manager + +# Release (2022-12-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.0.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v100-2022-12-21) + * **Release**: New AWS service client module + * **Feature**: AWS License Manager now offers cross-region, cross-account tracking of commercial Linux subscriptions on AWS. This includes subscriptions purchased as part of EC2 subscription-included AMIs, on the AWS Marketplace, or brought to AWS via Red Hat Cloud Access Program. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.25.0](service/macie2/CHANGELOG.md#v1250-2022-12-21) + * **Feature**: This release adds support for analyzing Amazon S3 objects that use the S3 Glacier Instant Retrieval (Glacier_IR) storage class. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.61.0](service/sagemaker/CHANGELOG.md#v1610-2022-12-21) + * **Feature**: This release enables adding RStudio Workbench support to an existing Amazon SageMaker Studio domain. It allows setting your RStudio on SageMaker environment configuration parameters and also updating the RStudioConnectUrl and RStudioPackageManagerUrl parameters for existing domains +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.4](service/ssm/CHANGELOG.md#v1334-2022-12-21) + * **Documentation**: Doc-only updates for December 2022. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.13.22](service/support/CHANGELOG.md#v11322-2022-12-21) + * **Documentation**: Documentation updates for the AWS Support API +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.26.0](service/transfer/CHANGELOG.md#v1260-2022-12-21) + * **Feature**: This release adds support for Decrypt as a workflow step type. + +# Release (2022-12-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.20.0](service/batch/CHANGELOG.md#v1200-2022-12-20) + * **Feature**: Adds isCancelled and isTerminated to DescribeJobs response. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.77.0](service/ec2/CHANGELOG.md#v1770-2022-12-20) + * **Feature**: Adds support for pagination in the EC2 DescribeImages API. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.16.0](service/lookoutequipment/CHANGELOG.md#v1160-2022-12-20) + * **Feature**: This release adds support for listing inference schedulers by status. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.27.0](service/medialive/CHANGELOG.md#v1270-2022-12-20) + * **Feature**: This release adds support for two new features to AWS Elemental MediaLive. First, you can now burn-in timecodes to your MediaLive outputs. Second, we now now support the ability to decode Dolby E audio when it comes in on an input. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.15.0](service/nimble/CHANGELOG.md#v1150-2022-12-20) + * **Feature**: Amazon Nimble Studio now supports configuring session storage volumes and persistence, as well as backup and restore sessions through launch profiles. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.1.0](service/resourceexplorer2/CHANGELOG.md#v110-2022-12-20) + * **Feature**: Documentation updates for AWS Resource Explorer. +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.13.0](service/route53domains/CHANGELOG.md#v1130-2022-12-20) + * **Feature**: Use Route 53 domain APIs to change owner, create/delete DS record, modify IPS tag, resend authorization. New: AssociateDelegationSignerToDomain, DisassociateDelegationSignerFromDomain, PushDomain, ResendOperationAuthorization. Updated: UpdateDomainContact, ListOperations, CheckDomainTransferability. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.60.0](service/sagemaker/CHANGELOG.md#v1600-2022-12-20) + * **Feature**: Amazon SageMaker Autopilot adds support for new objective metrics in CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.24.0](service/transcribe/CHANGELOG.md#v1240-2022-12-20) + * **Feature**: Enable our batch transcription jobs for Swedish and Vietnamese. + +# Release (2022-12-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.21.0](service/athena/CHANGELOG.md#v1210-2022-12-19) + * **Feature**: Add missed InvalidRequestException in GetCalculationExecutionCode,StopCalculationExecution APIs. Correct required parameters (Payload and Type) in UpdateNotebook API. Change Notebook size from 15 Mb to 10 Mb. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.22.0](service/ecs/CHANGELOG.md#v1220-2022-12-19) + * **Feature**: This release adds support for alarm-based rollbacks in ECS, a new feature that allows customers to add automated safeguards for Amazon ECS service rolling updates. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.14.0](service/kinesisvideo/CHANGELOG.md#v1140-2022-12-19) + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.0.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v100-2022-12-19) + * **Release**: New AWS service client module + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.36.0](service/rds/CHANGELOG.md#v1360-2022-12-19) + * **Feature**: Add support for --enable-customer-owned-ip to RDS create-db-instance-read-replica API for RDS on Outposts. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.59.0](service/sagemaker/CHANGELOG.md#v1590-2022-12-19) + * **Feature**: AWS Sagemaker - Sagemaker Images now supports Aliases as secondary identifiers for ImageVersions. SageMaker Images now supports additional metadata for ImageVersions for better images management. + +# Release (2022-12-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.22.0](service/appflow/CHANGELOG.md#v1220-2022-12-16) + * **Feature**: This release updates the ListConnectorEntities API action so that it returns paginated responses that customers can retrieve with next tokens. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.22.2](service/cloudfront/CHANGELOG.md#v1222-2022-12-16) + * **Documentation**: Updated documentation for CloudFront +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.20.0](service/datasync/CHANGELOG.md#v1200-2022-12-16) + * **Feature**: AWS DataSync now supports the use of tags with task executions. With this new feature, you can apply tags each time you execute a task, giving you greater control and management over your task executions. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.3](service/efs/CHANGELOG.md#v1183-2022-12-16) + * **Documentation**: General documentation updates for EFS. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.16.6](service/guardduty/CHANGELOG.md#v1166-2022-12-16) + * **Documentation**: This release provides the valid characters for the Description and Name field. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.0](service/iotfleetwise/CHANGELOG.md#v120-2022-12-16) + * **Feature**: Updated error handling for empty resource names in "UpdateSignalCatalog" and "GetModelManifest" operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.58.0](service/sagemaker/CHANGELOG.md#v1580-2022-12-16) + * **Feature**: AWS sagemaker - Features: This release adds support for random seed, it's an integer value used to initialize a pseudo-random number generator. Setting a random seed will allow the hyperparameter tuning search strategies to produce more consistent configurations for the same tuning job. + # Release (2022-12-15) ## General Highlights @@ -6199,7 +8977,7 @@ * Fixes [issue#1191](https://github.com/aws/aws-sdk-go-v2/issues/1191) * Refactored internal endpoints model for accessors * Feature: updated to latest models -* New services +* New services * `service/location` - v1.0.0 * `service/lookoutmetrics` - v1.0.0 ## Core SDK Highlights @@ -6283,7 +9061,7 @@ of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version ## Breaking Changes * `aws`: Updated Config.Retryer member to be a func that returns aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. + * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. * Removes duplicate `Retryer` interface from `retry` package. Single definition is `aws.Retryer` now. * `aws/middleware`: Updates `AddAttemptClockSkewMiddleware` to use appropriate `AddRecordResponseTiming` naming ([#1031](https://github.com/aws/aws-sdk-go-v2/pull/1031)) @@ -6291,7 +9069,7 @@ of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version * `config`: Updated the `WithRetryer` helper to take a function that returns an aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. * `API Clients`: Fix SDK's API client enum constant name generation to have expected casing ([#1020](https://github.com/aws/aws-sdk-go-v2/pull/1020)) - * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. + * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. * `API Clients`: Updates SDK's API client request middleware stack values to be scoped to individual operation call ([#1019](https://github.com/aws/aws-sdk-go-v2/pull/1019)) * The API client request middleware stack values were mistakenly allowed to escape to nested API operation calls. This broke the SDK's presigners. * Stack values that should not escape are not scoped to the individual operation call. @@ -6303,7 +9081,7 @@ of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version * Adds a PresignClient to the `sts` API client module. Use PresignGetCallerIdentity to obtain presigned URLs for the create presigned URLs for the GetCallerIdentity operation. * Fixes [#1021](https://github.com/aws/aws-sdk-go-v2/issues/1021) * `aws/retry`: Add package documentation for retry package ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * Adds documentation for the retry package + * Adds documentation for the retry package ## Bug Fixes * `Multiple API Clients`: Fix SDK's generated serde for unmodeled operation input/output ([#1050](https://github.com/aws/aws-sdk-go-v2/pull/1050)) @@ -6401,7 +9179,6 @@ feedback and to take advantage of modern Go language features. * Add support for reading `s3_use_arn_region` from shared config file ([#991](https://github.com/aws/aws-sdk-go-v2/pull/991)) * Add Utility for getting RequestID and HostID of response ([#983](https://github.com/aws/aws-sdk-go-v2/pull/983)) - ## Other changes * Updates branch `HEAD` points from `master` to `main`. * This should not impact your application, but if you have pull requests or forks of the SDK you may need to update the upstream branch your fork is based off of. @@ -6432,7 +9209,6 @@ The `config#LoadDefaultConfig` function has been updated to require a `context.C The v2 SDK corrects its behavior to be inline with the AWS CLI and other AWS SDKs. Refer to https://docs.aws.amazon.com/credref/latest/refdocs/overview.html for more information how to use the shared config and credentials files. - # Release 2020-11-30 ## Breaking Change @@ -6592,7 +9368,6 @@ The `config` module's exported types were trimmed down to add clarity and reduce * `credentials` module released at `v0.1.1` * `ec2imds` module released at `v0.1.1` - # Release 2020-09-28 ## Announcements We’re happy to share the updated clients for the v0.25.0 preview version of the AWS SDK for Go V2. @@ -6632,7 +9407,6 @@ As a part of the refactoring done to v2 preview SDK some components have not bee We expect additional breaking changes to the v2 preview SDK in the coming releases. We expect these changes to focus on organizational, naming, and hardening the SDK's design for future feature capabilities after it is released for general availability. - #### Relocated Packages In this release packages within the SDK were relocated, and in some cases those packages were converted to Go modules. The following is a list of packages have were relocated. @@ -6647,7 +9421,6 @@ The `github.com/aws/aws-sdk-go-v2/credentials` module contains refactored creden * `github.com/aws/aws-sdk-go-v2/processcreds` => `github.com/aws/aws-sdk-go-v2/credentials/processcreds` * `github.com/aws/aws-sdk-go-v2/stscreds` => `github.com/aws/aws-sdk-go-v2/credentials/stscreds` - #### Modularization New modules were added to the v2 preview SDK to allow the components to be versioned independently from each other. This allows your application to depend on specific versions of an API client module, and take discrete updates from the SDK core and other API client modules as desired. @@ -6656,7 +9429,6 @@ New modules were added to the v2 preview SDK to allow the components to be versi * [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) * Module for each API client, e.g. [github.com/aws/aws-sdk-go-v2/service/s3](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3) - #### API Clients The following is a list of the major changes to the API client modules @@ -6676,14 +9448,12 @@ result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ }) ``` - #### Configuration In addition to the `github.com/aws/aws-sdk-go-v2/aws/external` package being made a module at `github.com/aws/aws-sdk-go-v2/config`, the `LoadDefaultAWSConfig` function was renamed to `LoadDefaultConfig`. The `github.com/aws/aws-sdk-go-v2/aws/defaults` package has been removed. Its components have been migrated to the `github.com/aws/aws-sdk-go-v2/aws` package, and `github.com/aws/aws-sdk-go-v2/config` module. - #### Error Handling The `github.com/aws/aws-sdk-go-v2/aws/awserr` package was removed as a part of the SDK error handling refactor. The SDK now uses typed errors built around [Go v1.13](https://golang.org/doc/go1.13#error_wrapping)'s [errors.As](https://pkg.go.dev/errors#As) and [errors.Unwrap](https://pkg.go.dev/errors#Unwrap) features. All SDK error types that wrap other errors implement the `Unwrap` method. Generic v2 preview SDK errors created with `fmt.Errorf` use `%w` to wrap the underlying error. @@ -6726,7 +9496,6 @@ Logging an error value will include information from each wrapped error. For exa > 2020/10/15 16:03:37 operation error DynamoDB: Scan, https response error StatusCode: 400, RequestID: ABCREQUESTID123, ResourceNotFoundException: Requested resource not found - #### Endpoints The `github.com/aws/aws-sdk-go-v2/aws/endpoints` has been removed from the SDK, along with all exported endpoint definitions and iteration behavior. Each generated API client now includes its own endpoint definition internally to the module. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md index 3b64466870..5b627cfa60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ ## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md index c2fc3b8f5b..5e59bba7ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md @@ -14,29 +14,28 @@ Jump To: * [Feature Requests](#feature-requests) * [Code Contributions](#code-contributions) - ## How to contribute *Before you send us a pull request, please be sure that:* -1. You're working from the latest source on the master branch. -2. You check existing open, and recently closed, pull requests to be sure +1. You're working from the latest source on the `main` branch. +2. You check existing open, and recently closed, pull requests to be sure that someone else hasn't already addressed the problem. -3. You create an issue before working on a contribution that will take a +3. You create an issue before working on a contribution that will take a significant amount of your time. *Creating a Pull Request* 1. Fork the repository. -2. In your fork, make your change in a branch that's based on this repo's master branch. +2. In your fork, make your change in a branch that's based on this repo's `main` branch. 3. Commit the change to your fork, using a clear and descriptive commit message. 4. Create a pull request, answering any questions in the pull request form. -For contributions that will take a significant amount of time, open a new -issue to pitch your idea before you get started. Explain the problem and -describe the content you want to see added to the documentation. Let us know -if you'll write it yourself or if you'd like us to help. We'll discuss your -proposal with you and let you know whether we're likely to accept it. +For contributions that will take a significant amount of time, open a new +issue to pitch your idea before you get started. Explain the problem and +describe the content you want to see added to the documentation. Let us know +if you'll write it yourself or if you'd like us to help. We'll discuss your +proposal with you and let you know whether we're likely to accept it. ## Bug Reports @@ -74,9 +73,9 @@ guidelines prior to filing a bug report. Open an [issue][issues] with the following: -* A short, descriptive title. Ideally, other community members should be able +* A short, descriptive title. Ideally, other community members should be able to get a good idea of the feature just from reading the title. -* A detailed description of the the proposed feature. +* A detailed description of the the proposed feature. * Why it should be added to the SDK. * If possible, example code to illustrate how it should work. * Use Markdown to make the request easier to read; @@ -97,7 +96,7 @@ Please be aware of the following notes prior to opening a pull request: 3. Wherever possible, pull requests should contain tests as appropriate. Bugfixes should contain tests that exercise the corrected behavior (i.e., the - test should fail without the bugfix and pass with it), and new features + test should fail without the bugfix and pass with it), and new features should be accompanied by tests exercising the feature. 4. Pull requests that contain failing tests will not be merged until the test @@ -112,7 +111,7 @@ Please be aware of the following notes prior to opening a pull request: ### Testing -To run the tests locally, running the `make unit` command will `go get` the +To run the tests locally, running the `make unit` command will `go get` the SDK's testing dependencies, and run vet, link and unit tests for the SDK. ``` @@ -129,7 +128,7 @@ go test -tags codegen ./private/... See the `Makefile` for additional testing tags that can be used in testing. -To test on multiple platform the SDK includes several DockerFiles under the +To test on multiple platform the SDK includes several DockerFiles under the `awstesting/sandbox` folder, and associated make recipes to to execute unit testing within environments configured for specific Go versions. @@ -170,9 +169,9 @@ This will result in a patch version change. * `SDK Bugs` - For minor changes that resolve an issue. This will result in a patch version change. -[issues]: https://github.com/aws/aws-sdk-go/issues -[pr]: https://github.com/aws/aws-sdk-go/pulls +[issues]: https://github.com/aws/aws-sdk-go-v2/issues +[pr]: https://github.com/aws/aws-sdk-go-v2/pulls [license]: http://aws.amazon.com/apache2.0/ [cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement -[releasenotes]: https://github.com/aws/aws-sdk-go/releases +[releasenotes]: https://github.com/aws/aws-sdk-go-v2/releases diff --git a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md index 8490c7d673..4c9be94a2f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md @@ -12,4 +12,4 @@ Past Discussions --- The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions. -[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md +[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile index 4bc9dfaf01..4f74a26541 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -246,7 +246,6 @@ unit-race-modules-%: "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." - unit-modules-%: @# unit command that uses the pattern to define the root path that the @# module testing will start from. Strips off the "unit-modules-" and @@ -408,7 +407,6 @@ bench-modules-%: && go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \ "go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..." - ##################### # Release Process # ##################### @@ -499,14 +497,22 @@ list-deps-%: ################### .PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip -sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip +sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-go1.18 sandbox-test-go1.19 sandbox-test-go1.20 sandbox-test-gotip sandbox-build-%: @# sandbox-build-go1.17 @# sandbox-build-gotip - docker build \ - -f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \ - -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . + @if [ $@ == sandbox-build-gotip ]; then\ + docker build \ + -f ./internal/awstesting/sandbox/Dockerfile.test.gotip \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ + else\ + docker build \ + --build-arg GO_VERSION=$(subst sandbox-build-go,,$@) \ + -f ./internal/awstesting/sandbox/Dockerfile.test.goversion \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ + fi + sandbox-run-%: sandbox-build-% @# sandbox-run-go1.17 @# sandbox-run-gotip diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt index 5f14d1162e..899129ecc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md index da74d0e333..54626706f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/README.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/README.md @@ -1,7 +1,6 @@ # AWS SDK for Go v2 -[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - +[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) `aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language. @@ -87,7 +86,7 @@ func main() { ###### Compile and Execute ```sh $ go run . -Table: +Tables: tableOne tableTwo ``` @@ -97,9 +96,9 @@ tableTwo Please use these community resources for getting help. We use the GitHub issues for tracking bugs and feature requests. -* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag. -* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). +* Ask us a [question](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=q-a) or open a [discussion](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=general). * If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose). +* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/). @@ -118,7 +117,7 @@ Keeping the list of open issues lean will help us respond in a timely manner. ## Feedback and contributing -The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. +The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. **GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch. @@ -139,8 +138,8 @@ API operation require parameters. [Service Documentation](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with AWS services. These guides are -great for getting started with a service, or when looking for more -information about a service. While this document is not required for coding, +great for getting started with a service, or when looking for more +information about a service. While this document is not required for coding, services may supply helpful samples to look out for. [Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback @@ -151,7 +150,7 @@ services may supply helpful samples to look out for. [Dep]: https://github.com/golang/dep [Issues]: https://github.com/aws/aws-sdk-go-v2/issues [Projects]: https://github.com/aws/aws-sdk-go-v2/projects -[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md +[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md [Amazon DynamoDB]: https://aws.amazon.com/dynamodb/ -[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md +[design]: https://github.com/aws/aws-sdk-go-v2/blob/main/DESIGN.md [license]: http://aws.amazon.com/apache2.0/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 37c643dd37..4c739fb89f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.3" +const goModuleVersion = "1.17.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go index 9d7d3a0cb5..47ebc0f547 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -36,20 +36,31 @@ type Array struct { memberName string // Elements are stored in values, so we keep track of the list size here. size int32 + // Empty lists are encoded as "=", if we add a value later we will + // remove this encoding + emptyValue Value } func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + emptyValue := newValue(values, prefix, flat) + emptyValue.String("") + return &Array{ values: values, prefix: prefix, flat: flat, memberName: memberName, + emptyValue: emptyValue, } } // Value adds a new element to the Query Array. Returns a Value type used to // encode the array element. func (a *Array) Value() Value { + if a.size == 0 { + delete(a.values, a.emptyValue.key) + } + // Query lists start a 1, so adjust the size first a.size++ prefix := a.prefix diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go index c228f7d878..6975ce6524 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -21,26 +21,18 @@ func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorCompone if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - RequestID: errResponse.RequestID, - }, nil + return ErrorComponents(errResponse), nil } var errResponse wrappedErrorResponse if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - RequestID: errResponse.RequestID, - }, nil + return ErrorComponents(errResponse), nil } // noWrappedErrorResponse represents the error response body with -// no internal ... +// wrapped within Error type wrappedErrorResponse struct { Code string `xml:"Error>Code"` Message string `xml:"Error>Message"` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go index 12a3f0c4fb..d89090ad38 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -30,10 +30,6 @@ func NewTokenRateLimit(tokens uint) *TokenRateLimit { } } -func isTimeoutError(error) bool { - return false -} - type canceledError struct { Err error } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 3326289a15..822fc920a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -11,7 +11,6 @@ import ( awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" smithymiddle "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/transport/http" ) @@ -292,7 +291,7 @@ type retryMetadataKey struct{} // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { - metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) return metadata, ok } @@ -301,7 +300,7 @@ func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { - return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata) + return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) } // AddRetryMiddlewaresOptions is the set of options that can be passed to diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index c695e6fe52..00d7d3eeea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -95,8 +95,13 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { var timeoutErr interface{ Timeout() bool } var urlErr *url.Error var netOpErr *net.OpError + var dnsError *net.DNSError switch { + case errors.As(err, &dnsError): + // NXDOMAIN errors should not be retried + retryable = !dnsError.IsNotFound && dnsError.IsTemporary + case errors.As(err, &conErr) && conErr.ConnectionError(): retryable = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index 85a1d8f032..64c4c4845e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -7,6 +7,7 @@ var IgnoredHeaders = Rules{ "Authorization": struct{}{}, "User-Agent": struct{}{}, "X-Amzn-Trace-Id": struct{}{}, + "Expect": struct{}{}, }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 04278678e0..15b4d60ded 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,55 @@ +# v1.18.21 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2023-03-16) + +* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. + +# v1.18.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2023-02-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.8 (2023-01-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 89734b1ebf..d0b3e4393c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.8" +const goModuleVersion = "1.18.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index 1bb6addf3a..b21cd30804 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -384,10 +384,6 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro return fmt.Errorf("token file path is not set") } - if len(roleARN) == 0 { - return fmt.Errorf("role ARN is not set") - } - optFns := []func(*stscreds.WebIdentityRoleOptions){ func(options *stscreds.WebIdentityRoleOptions) { options.RoleSessionName = sessionName @@ -398,11 +394,29 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro if err != nil { return err } + if found { optFns = append(optFns, optFn) } - provider := stscreds.NewWebIdentityRoleProvider(sts.NewFromConfig(*cfg), roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + opts := stscreds.WebIdentityRoleOptions{ + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&opts) + } + + if len(opts.RoleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + client := opts.Client + if client == nil { + client = sts.NewFromConfig(*cfg) + } + + provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) cfg.Credentials = provider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 91d150e6a9..6e64481b31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,51 @@ +# v1.13.20 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-02-01) + +* No change notes available for this release. + +# v1.13.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.8 (2023-01-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go index 72214bf405..6ed71b42b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -11,7 +11,7 @@ // # Loading credentials with the SDK's AWS Config // // The EC2 Instance role credentials provider will automatically be the resolved -// credential provider int he credential chain if no other credential provider is +// credential provider in the credential chain if no other credential provider is // resolved first. // // To explicitly instruct the SDK's credentials resolving to use the EC2 Instance diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 8cbe05de64..2e7471d663 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.8" +const goModuleVersion = "1.13.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index 3921da34cd..fe9345e287 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -149,12 +149,24 @@ func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *P return p } -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. SecretAccessKey string - SessionToken string - Expiration *time.Time + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time } // Retrieve executes the credential process command and returns the @@ -166,7 +178,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { } // Serialize and validate response - resp := &credentialProcessResponse{} + resp := &CredentialProcessResponse{} if err = json.Unmarshal(out, resp); err != nil { return aws.Credentials{Source: ProviderName}, &ProviderError{ Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 2230c41822..e1a9d9156e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,27 @@ +# v1.13.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-03-14) + +* **Feature**: Add flag to disable IMDSv1 fallback + +# v1.12.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.21 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go index f97730bd93..e55edd992e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -174,6 +174,16 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // Configure IMDSv1 fallback behavior. By default, the client will attempt + // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] + // the client will return any errors encountered from attempting to fetch a token + // instead of silently using the insecure data flow of IMDSv1. + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EnableFallback aws.Ternary + // provides the caching of API tokens used for operation calls. If unset, // the API token will not be retrieved for the operation. tokenProvider *tokenProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index de4ef3eff7..50ae84d71d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.21" +const goModuleVersion = "1.13.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go index 275fade488..5703c6e16a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" "net/http" "sync" "sync/atomic" "time" - smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -68,7 +70,7 @@ func (t *tokenProvider) HandleFinalize( ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - if !t.enabled() { + if t.fallbackEnabled() && !t.enabled() { // short-circuits to insecure data flow if token provider is disabled. return next.HandleFinalize(ctx, input) } @@ -115,23 +117,15 @@ func (t *tokenProvider) HandleDeserialize( } if resp.StatusCode == http.StatusUnauthorized { // unauthorized - err = &retryableError{Err: err} t.enable() + err = &retryableError{Err: err, isRetryable: true} } return out, metadata, err } -type retryableError struct { - Err error -} - -func (*retryableError) RetryableError() bool { return true } - -func (e *retryableError) Error() string { return e.Err.Error() } - func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { - if !t.enabled() { + if t.fallbackEnabled() && !t.enabled() { return nil, &bypassTokenRetrievalError{ Err: fmt.Errorf("cannot get API token, provider disabled"), } @@ -147,7 +141,7 @@ func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) tok, err = t.updateToken(ctx) if err != nil { - return nil, fmt.Errorf("cannot get API token, %w", err) + return nil, err } return tok, nil @@ -167,17 +161,19 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { TokenTTL: t.tokenTTL, }) if err != nil { - // change the disabled flag on token provider to true, when error is request timeout error. var statusErr interface{ HTTPStatusCode() int } if errors.As(err, &statusErr) { switch statusErr.HTTPStatusCode() { - - // Disable get token if failed because of 403, 404, or 405 + // Disable future get token if failed because of 403, 404, or 405 case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - t.disable() + if t.fallbackEnabled() { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) + t.disable() + } // 400 errors are terminal, and need to be upstreamed case http.StatusBadRequest: @@ -192,8 +188,17 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { atomic.StoreUint32(&t.disabled, 1) } - // Token couldn't be retrieved, but bypass this, and allow the - // request to continue. + if !t.fallbackEnabled() { + // NOTE: getToken() is an implementation detail of some outer operation + // (e.g. GetMetadata). It has its own retries that have already been exhausted. + // Mark the underlying error as a terminal error. + err = &retryableError{Err: err, isRetryable: false} + return nil, err + } + + // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request + // and allow the request to proceed. Future requests _may_ re-attempt fetching a + // token if not disabled. return nil, &bypassTokenRetrievalError{Err: err} } @@ -206,21 +211,21 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { return tok, nil } -type bypassTokenRetrievalError struct { - Err error -} - -func (e *bypassTokenRetrievalError) Error() string { - return fmt.Sprintf("bypass token retrieval, %v", e.Err) -} - -func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } - // enabled returns if the token provider is current enabled or not. func (t *tokenProvider) enabled() bool { return atomic.LoadUint32(&t.disabled) == 0 } +// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise +func (t *tokenProvider) fallbackEnabled() bool { + switch t.client.options.EnableFallback { + case aws.FalseTernary: + return false + default: + return true + } +} + // disable disables the token provider and it will no longer attempt to inject // the token, nor request updates. func (t *tokenProvider) disable() { @@ -235,3 +240,22 @@ func (t *tokenProvider) enable() { t.tokenMux.Unlock() atomic.StoreUint32(&t.disabled, 0) } + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +type retryableError struct { + Err error + isRetryable bool +} + +func (e *retryableError) RetryableError() bool { return e.isRetryable } + +func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index cffa7288a2..ee2aa588e2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,23 @@ +# v1.1.32 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.27 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index b47c6baa0a..b95efa5a18 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.27" +const goModuleVersion = "1.1.32" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index fb3d33ff5e..a241bc1a3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,23 @@ +# v2.4.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.21 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index e6a8286db7..b03a15f67b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.21" +const goModuleVersion = "2.4.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index efd865d6bb..2d68b862db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,23 @@ +# v1.3.33 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.28 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 96ac9fbecb..5acd1a1f62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.28" +const goModuleVersion = "1.3.33" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index ae9ae243fd..7d43e4809e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,23 @@ +# v1.9.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.21 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index c49853b92b..9e8b21c0ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.21" +const goModuleVersion = "1.9.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 91054696c7..cf3d97da61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,36 @@ +# v1.12.8 (2023-04-10) + +* No change notes available for this release. + +# v1.12.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.12.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.12.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index 7bb0698444..6f30ddc994 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -114,7 +114,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 1c2b7499d5..e3b50cdb29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -10,8 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the STS short-term credentials for a given role name that is assigned to -// the user. +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { if params == nil { params = &GetRoleCredentialsInput{} @@ -30,8 +30,7 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti type GetRoleCredentialsInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index 4fffc77af5..14e753e78a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -30,8 +30,7 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI type ListAccountRolesInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index e717a426c5..7848b01d2d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -12,8 +12,7 @@ import ( ) // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) // in the IAM Identity Center User Guide. This operation returns a paginated // response. func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { @@ -34,8 +33,7 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op type ListAccountsInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 8b9b44745e..407f9d1b3b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -19,9 +19,8 @@ import ( // temporary AWS credentials are returned to the client. After user logout, any // existing IAM role sessions that were created by using IAM Identity Center // permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) in -// the IAM Identity Center User Guide. +// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -40,8 +39,7 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func type LogoutInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index 6a1851da25..8bba205f43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -86,9 +86,9 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -97,7 +97,7 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -109,8 +109,8 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -242,9 +242,9 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -253,7 +253,7 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -265,8 +265,8 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -407,9 +407,9 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -418,7 +418,7 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -430,8 +430,8 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -550,9 +550,9 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -561,7 +561,7 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -573,8 +573,8 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index f981b154fb..59456d5dc2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -9,14 +9,13 @@ // and roles assigned to them and get federated into the application. Although AWS // Single Sign-On was renamed, the sso and identitystore API namespaces will // continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). -// This reference guide describes the IAM Identity Center Portal operations that +// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . This reference guide describes the IAM Identity Center Portal operations that // you can call programatically and includes detailed information on data types and // errors. AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. // The SDKs provide a convenient way to create programmatic access to IAM Identity // Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services -// (http://aws.amazon.com/tools/). +// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// . package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 3046500f83..639e9bf754 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.0" +const goModuleVersion = "1.12.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go index 8f0ac3a293..e97a126e8b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -27,7 +27,7 @@ func (e *InvalidRequestException) ErrorMessage() string { return *e.Message } func (e *InvalidRequestException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidRequestException" } return *e.ErrorCodeOverride @@ -53,7 +53,7 @@ func (e *ResourceNotFoundException) ErrorMessage() string { return *e.Message } func (e *ResourceNotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ResourceNotFoundException" } return *e.ErrorCodeOverride @@ -80,7 +80,7 @@ func (e *TooManyRequestsException) ErrorMessage() string { return *e.Message } func (e *TooManyRequestsException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "TooManyRequestsException" } return *e.ErrorCodeOverride @@ -107,7 +107,7 @@ func (e *UnauthorizedException) ErrorMessage() string { return *e.Message } func (e *UnauthorizedException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnauthorizedException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go index 051056b759..8dc02296b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -26,8 +26,7 @@ type RoleCredentials struct { // The identifier used for the temporary security credentials. For more // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. AccessKeyId *string @@ -35,14 +34,12 @@ type RoleCredentials struct { Expiration int64 // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. SecretAccessKey *string // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. SessionToken *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 5d6cc1575c..22b00b46e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,36 @@ +# v1.14.8 (2023-04-10) + +* No change notes available for this release. + +# v1.14.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.14.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.14.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.14.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 5e0a85a2c1..111f66d3b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -114,7 +114,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index cde97b4f3a..2c2d4393a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -43,7 +43,7 @@ type CreateTokenInput struct { // Supports grant types for the authorization code, refresh token, and device code // request. For device code requests, specify the following value: - // urn:ietf:params:oauth:grant-type:device_code For information about how to + // urn:ietf:params:oauth:grant-type:device_code For information about how to // obtain the device code, see the StartDeviceAuthorization topic. // // This member is required. @@ -65,9 +65,8 @@ type CreateTokenInput struct { // Currently, refreshToken is not yet implemented and is not supported. For more // information about the features and limitations of the current IAM Identity // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). - // The token used to obtain an access token in the event that the access token is + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The token used to obtain an access token in the event that the access token is // invalid or expired. RefreshToken *string @@ -89,22 +88,20 @@ type CreateTokenOutput struct { // Currently, idToken is not yet implemented and is not supported. For more // information about the features and limitations of the current IAM Identity // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). - // The identifier of the user that associated with the access token, if present. + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The identifier of the user that associated with the access token, if present. IdToken *string // Currently, refreshToken is not yet implemented and is not supported. For more // information about the features and limitations of the current IAM Identity // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). - // A token that, if present, can be used to refresh a previously issued access + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . A token that, if present, can be used to refresh a previously issued access // token that might have expired. RefreshToken *string // Used to notify the client that the returned token is an access token. The - // supported type is BearerToken. + // supported type is BearerToken . TokenType *string // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 3ed8cc35f7..0c8a4b3141 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -40,8 +40,8 @@ type RegisterClientInput struct { // This member is required. ClientType *string - // The list of scopes that are defined by the client. Upon authorization, this list - // is used to restrict permissions when granting an access token. + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. Scopes []string noSmithyDocumentSerde @@ -59,8 +59,8 @@ type RegisterClientOutput struct { // Indicates the time at which the clientId and clientSecret were issued. ClientIdIssuedAt int64 - // A secret string generated for the client. The client will use this string to get - // authenticated by the service in subsequent calls. + // A secret string generated for the client. The client will use this string to + // get authenticated by the service in subsequent calls. ClientSecret *string // Indicates the time at which the clientId and clientSecret will become invalid. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index 013ccbc935..42796d7f02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -28,9 +28,9 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi type StartDeviceAuthorizationInput struct { - // The unique identifier string for the client that is registered with IAM Identity - // Center. This value should come from the persisted result of the RegisterClient - // API operation. + // The unique identifier string for the client that is registered with IAM + // Identity Center. This value should come from the persisted result of the + // RegisterClient API operation. // // This member is required. ClientId *string @@ -42,8 +42,7 @@ type StartDeviceAuthorizationInput struct { ClientSecret *string // The URL for the AWS access portal. For more information, see Using the AWS - // access portal - // (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) // in the IAM Identity Center User Guide. // // This member is required. @@ -73,9 +72,9 @@ type StartDeviceAuthorizationOutput struct { // device. VerificationUri *string - // An alternate URL that the client can use to automatically launch a browser. This - // process skips the manual step in which the user visits the verification page and - // enters their code. + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. VerificationUriComplete *string // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index e9939aff0d..ca30d22f97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -85,9 +85,9 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -96,7 +96,7 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -108,8 +108,8 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -306,9 +306,9 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -317,7 +317,7 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -329,8 +329,8 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -519,9 +519,9 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -530,7 +530,7 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -542,8 +542,8 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index a025f7327e..2239427d88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -1,7 +1,7 @@ // Code generated by smithy-go-codegen DO NOT EDIT. -// Package ssooidc provides the API client, operations, and parameter types for AWS -// SSO OIDC. +// Package ssooidc provides the API client, operations, and parameter types for +// AWS SSO OIDC. // // AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect (OIDC) // is a web service that enables a client (such as AWS CLI or a native application) @@ -10,37 +10,27 @@ // with IAM Identity Center. Although AWS Single Sign-On was renamed, the sso and // identitystore API namespaces will continue to retain their original name for // backward compatibility purposes. For more information, see IAM Identity Center -// rename -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). -// Considerations for Using This Guide Before you begin using this guide, we +// rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . Considerations for Using This Guide Before you begin using this guide, we // recommend that you first review the following important information about how // the IAM Identity Center OIDC service works. +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard ( +// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) +// that are necessary to enable single sign-on authentication with the AWS CLI. +// Support for other OIDC flows frequently needed for native applications, such as +// Authorization Code Flow (+ PKCE), will be addressed in future releases. +// - The service emits only OIDC access tokens, such that obtaining a new token +// (For example, token refresh) requires explicit user re-authentication. +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with IAM-protected +// AWS service endpoints. For more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. // -// * The IAM Identity Center OIDC -// service currently implements only the portions of the OAuth 2.0 Device -// Authorization Grant standard (https://tools.ietf.org/html/rfc8628 -// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single -// sign-on authentication with the AWS CLI. Support for other OIDC flows frequently -// needed for native applications, such as Authorization Code Flow (+ PKCE), will -// be addressed in future releases. -// -// * The service emits only OIDC access tokens, -// such that obtaining a new token (For example, token refresh) requires explicit -// user re-authentication. -// -// * The access tokens provided by this service grant -// access to all AWS account entitlements assigned to an IAM Identity Center user, -// not just a particular application. -// -// * The documentation in this guide does not -// describe the mechanism to convert the access token into AWS Auth (“sigv4”) -// credentials for use with IAM-protected AWS service endpoints. For more -// information, see GetRoleCredentials -// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. -// -// For general information -// about IAM Identity Center, see What is IAM Identity Center? -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the -// IAM Identity Center User Guide. +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 6e76b14b37..43ad751086 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.0" +const goModuleVersion = "1.14.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index b75cc489d9..115a51a9eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -29,15 +29,15 @@ func (e *AccessDeniedException) ErrorMessage() string { return *e.Message } func (e *AccessDeniedException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "AccessDeniedException" } return *e.ErrorCodeOverride } func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Indicates that a request to authorize a client with an access user session token -// is pending. +// Indicates that a request to authorize a client with an access user session +// token is pending. type AuthorizationPendingException struct { Message *string @@ -59,7 +59,7 @@ func (e *AuthorizationPendingException) ErrorMessage() string { return *e.Message } func (e *AuthorizationPendingException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "AuthorizationPendingException" } return *e.ErrorCodeOverride @@ -89,7 +89,7 @@ func (e *ExpiredTokenException) ErrorMessage() string { return *e.Message } func (e *ExpiredTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ExpiredTokenException" } return *e.ErrorCodeOverride @@ -119,7 +119,7 @@ func (e *InternalServerException) ErrorMessage() string { return *e.Message } func (e *InternalServerException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InternalServerException" } return *e.ErrorCodeOverride @@ -128,7 +128,7 @@ func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy // Indicates that the clientId or clientSecret in the request is invalid. For // example, this can occur when a client sends an incorrect clientId or an expired -// clientSecret. +// clientSecret . type InvalidClientException struct { Message *string @@ -150,15 +150,15 @@ func (e *InvalidClientException) ErrorMessage() string { return *e.Message } func (e *InvalidClientException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidClientException" } return *e.ErrorCodeOverride } func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Indicates that the client information sent in the request during registration is -// invalid. +// Indicates that the client information sent in the request during registration +// is invalid. type InvalidClientMetadataException struct { Message *string @@ -180,7 +180,7 @@ func (e *InvalidClientMetadataException) ErrorMessage() string { return *e.Message } func (e *InvalidClientMetadataException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidClientMetadataException" } return *e.ErrorCodeOverride @@ -210,7 +210,7 @@ func (e *InvalidGrantException) ErrorMessage() string { return *e.Message } func (e *InvalidGrantException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidGrantException" } return *e.ErrorCodeOverride @@ -240,7 +240,7 @@ func (e *InvalidRequestException) ErrorMessage() string { return *e.Message } func (e *InvalidRequestException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidRequestException" } return *e.ErrorCodeOverride @@ -269,7 +269,7 @@ func (e *InvalidScopeException) ErrorMessage() string { return *e.Message } func (e *InvalidScopeException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidScopeException" } return *e.ErrorCodeOverride @@ -299,7 +299,7 @@ func (e *SlowDownException) ErrorMessage() string { return *e.Message } func (e *SlowDownException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "SlowDownException" } return *e.ErrorCodeOverride @@ -329,7 +329,7 @@ func (e *UnauthorizedClientException) ErrorMessage() string { return *e.Message } func (e *UnauthorizedClientException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnauthorizedClientException" } return *e.ErrorCodeOverride @@ -358,7 +358,7 @@ func (e *UnsupportedGrantTypeException) ErrorMessage() string { return *e.Message } func (e *UnsupportedGrantTypeException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnsupportedGrantTypeException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index e624d601e7..8e75829ab1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,40 @@ +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# v1.18.2 (2023-01-25) + +* **Documentation**: Doc only change to update wording in a key topic + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + # v1.18.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 3041fc467e..78eb267020 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -117,7 +117,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index f4f4f46f44..99d74625d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -12,21 +12,17 @@ import ( ) // Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. These -// temporary credentials consist of an access key ID, a secret access key, and a -// security token. Typically, you use AssumeRole within your account or for -// cross-account access. For a comparison of AssumeRole with other API operations -// that produce temporary credentials, see Requesting Temporary Security -// Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Amazon Web Services resources. These temporary credentials consist of an access +// key ID, a secret access key, and a security token. Typically, you use AssumeRole +// within your account or for cross-account access. For a comparison of AssumeRole +// with other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service with -// the following exception: You cannot call the Amazon Web Services STS +// AssumeRole can be used to make API calls to any Amazon Web Services service +// with the following exception: You cannot call the Amazon Web Services STS // GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -37,8 +33,7 @@ import ( // credentials in subsequent Amazon Web Services API calls to access resources in // the account that owns the role. You cannot use session policies to grant more // permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. When you create a role, you create two policies: A role // trust policy that specifies who can assume the role and a permissions policy // that specifies what can be done with the role. You specify the trusted principal @@ -49,37 +44,29 @@ import ( // that access to users in the account. A user who wants to access a role in a // different account must also have permissions that are delegated from the user // account administrator. The administrator must attach a policy that allows the -// user to call AssumeRole for the ARN of the role in the other account. To allow a -// user to assume a role in the same account, you can do either of the -// following: +// user to call AssumeRole for the ARN of the role in the other account. To allow +// a user to assume a role in the same account, you can do either of the following: // -// * Attach a policy to the user that allows the user to call -// AssumeRole (as long as the role's trust policy trusts the account). +// - Attach a policy to the user that allows the user to call AssumeRole (as long +// as the role's trust policy trusts the account). +// - Add the user as a principal directly in the role's trust policy. // -// * Add the -// user as a principal directly in the role's trust policy. -// -// You can do either -// because the role’s trust policy acts as an IAM resource-based policy. When a -// resource-based policy grants access to a principal in the same account, no -// additional identity-based policy is required. For more information about trust -// policies and resource-based policies, see IAM Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the -// IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the same +// account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your // session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. An administrator must grant you the permissions necessary to -// pass session tags. The administrator can also create granular permissions to +// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to // allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles with -// Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole. This is +// multi-factor authentication (MFA) information when you call AssumeRole . This is // useful for cross-account scenarios to ensure that the user that assumes the role // has been authenticated with an Amazon Web Services MFA device. In that scenario, // the trust policy of the role being assumed includes a condition that tests for @@ -87,12 +74,11 @@ import ( // request to assume the role is denied. The condition in a trust policy that tests // for MFA authentication might look like the following example. "Condition": // {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) in the -// IAM User Guide guide. To use MFA with AssumeRole, you pass values for the -// SerialNumber and TokenCode parameters. The SerialNumber value identifies the -// user's hardware or virtual MFA device. The TokenCode is the time-based one-time -// password (TOTP) that the MFA device produces. +// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for +// the SerialNumber and TokenCode parameters. The SerialNumber value identifies +// the user's hardware or virtual MFA device. The TokenCode is the time-based +// one-time password (TOTP) that the MFA device produces. func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { params = &AssumeRoleInput{} @@ -144,16 +130,14 @@ type AssumeRoleInput struct { // maximum session duration setting for your role. However, if you assume a role // using role chaining and provide a DurationSeconds parameter value greater than // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. By default, the value is set to 3600 seconds. The // DurationSeconds parameter is separate from the duration of a console session // that you might request using the returned credentials. The request to the // federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int32 @@ -166,8 +150,7 @@ type AssumeRoleInput struct { // administrator of the trusted account. That way, only someone with the ID can // assume the role, rather than everyone in the account. For more information about // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: @@ -182,8 +165,7 @@ type AssumeRoleInput struct { // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based // policy of the role that is being assumed. For more information, see Session - // Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The plaintext that you use for both inline and managed // session policies can't exceed 2,048 characters. The JSON policy characters can // be any ASCII character from the space character to the end of the valid @@ -201,9 +183,8 @@ type AssumeRoleInput struct { // the role. This parameter is optional. You can provide up to 10 managed policy // ARNs. However, the plaintext that you use for both inline and managed session // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. An Amazon Web Services conversion + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion // compresses the passed inline session policy, managed policy ARNs, and session // tags into a packed binary format that has a separate limit. Your request can // fail for this limit even if your plaintext meets the other requirements. The @@ -215,17 +196,16 @@ type AssumeRoleInput struct { // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the // identity-based policy of the role that is being assumed. For more information, - // see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType - // The identification number of the MFA device that is associated with the user who - // is making the AssumeRole call. Specify this value if the trust policy of the - // role being assumed includes a condition that requires MFA authentication. The - // value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy of + // the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as + // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter // is a string of characters consisting of upper- and lower-case alphanumeric // characters with no spaces. You can also include underscores or any of the // following characters: =,.@- @@ -238,24 +218,21 @@ type AssumeRoleInput struct { // who took actions with a role. You can use the aws:SourceIdentity condition key // to further control access to Amazon Web Services resources based on the value of // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws:. This prefix is + // =,.@-. You cannot use a value that begins with the text aws: . This prefix is // reserved for Amazon Web Services internal use. SourceIdentity *string - // A list of session tags that you want to pass. Each session tag consists of a key - // name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - // The plaintext session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // A list of session tags that you want to pass. Each session tag consists of a + // key name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters, and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -265,16 +242,15 @@ type AssumeRoleInput struct { // same key as a tag that is already attached to the role. When you do, session // tags override a role tag with the same key. Tag key–value pairs are not case // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the - // Department=Marketing tag and you pass the department=engineering session tag. - // Department and department are not saved as separate tags, and the session tag - // passed in the request takes precedence over the role tag. Additionally, if you - // used temporary credentials to perform this operation, the new session inherits - // any transitive session tags from the calling session. If you pass a session tag - // with the same key as an inherited tag, the operation fails. To view the - // inherited tags for a session, see the CloudTrail logs. For more information, see - // Viewing Session Tags in CloudTrail - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // Department and department tag keys. Assume that the role has the Department = + // Marketing tag and you pass the department = engineering session tag. Department + // and department are not saved as separate tags, and the session tag passed in + // the request takes precedence over the role tag. Additionally, if you used + // temporary credentials to perform this operation, the new session inherits any + // transitive session tags from the calling session. If you pass a session tag with + // the same key as an inherited tag, the operation fails. To view the inherited + // tags for a session, see the CloudTrail logs. For more information, see Viewing + // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) // in the IAM User Guide. Tags []types.Tag @@ -286,11 +262,10 @@ type AssumeRoleInput struct { // sequence of six numeric digits. TokenCode *string - // A list of keys for session tags that you want to set as transitive. If you set a - // tag key as transitive, the corresponding key and value passes to subsequent + // A list of keys for session tags that you want to set as transitive. If you set + // a tag key as transitive, the corresponding key and value passes to subsequent // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. This parameter is optional. When you set session tags as // transitive, the session policy and session tags packed binary limit is not // affected. If you choose not to specify a transitive tag key, then no tags are @@ -309,7 +284,7 @@ type AssumeRoleOutput struct { // that you can use to refer to the resulting temporary security credentials. For // example, you can reference these credentials as a principal in a resource-based // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole. + // RoleSessionName that you specified when you called AssumeRole . AssumedRoleUser *types.AssumedRoleUser // The temporary security credentials, which include an access key ID, a secret @@ -331,8 +306,7 @@ type AssumeRoleOutput struct { // who took actions with a role. You can use the aws:SourceIdentity condition key // to further control access to Amazon Web Services resources based on the value of // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 4ed0f5d07f..4c62d77e31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -15,10 +15,8 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. The temporary security credentials returned by this // operation consist of an access key ID, a secret access key, and a security // token. Applications can use these temporary security credentials to sign calls @@ -31,15 +29,12 @@ import ( // DurationSeconds value from 900 seconds (15 minutes) up to the maximum session // duration setting for the role. This setting can have a value from 1 hour to 12 // hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you use // the AssumeRole* API operations or the assume-role* CLI commands. However the // limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM -// User Guide. Role chaining -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) // limits your CLI or Amazon Web Services API role session to a maximum of one // hour. When you use the AssumeRole API operation to assume a role, you can // specify the duration of your role session with the DurationSeconds parameter. @@ -50,8 +45,7 @@ import ( // credentials created by AssumeRoleWithSAML can be used to make API calls to any // Amazon Web Services service with the following exception: you cannot call the // STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -62,8 +56,7 @@ import ( // credentials in subsequent Amazon Web Services API calls to access resources in // the account that owns the role. You cannot use session policies to grant more // permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of // Amazon Web Services security credentials. The identity of the caller is // validated by using keys in the metadata document that is uploaded for the SAML @@ -71,16 +64,14 @@ import ( // result in an entry in your CloudTrail logs. The entry includes the value in the // NameID element of the SAML assertion. We recommend that you use a NameIDType // that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier -// (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). Tags (Optional) You can +// example, you could instead use the persistent identifier ( +// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can // configure your IdP to pass attributes into your SAML assertion as session tags. // Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag -// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For -// these and additional limits, see IAM and STS Character Limits -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -91,36 +82,25 @@ import ( // override the role's tags with the same key. An administrator must grant you the // permissions necessary to pass session tags. The administrator can also create // granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles with -// Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to +// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to // issue the claims required by Amazon Web Services. Additionally, you must use // Identity and Access Management (IAM) to create a SAML provider entity in your // Amazon Web Services account that represents your identity provider. You must // also create an IAM role that specifies this SAML provider in its trust policy. // For more information, see the following resources: -// -// * About SAML 2.0-based -// Federation -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { if params == nil { params = &AssumeRoleWithSAMLInput{} @@ -150,8 +130,7 @@ type AssumeRoleWithSAMLInput struct { RoleArn *string // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // // This member is required. @@ -166,16 +145,14 @@ type AssumeRoleWithSAMLInput struct { // than this setting, the operation fails. For example, if you specify a session // duration of 12 hours, but your administrator set the maximum session duration to // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. By default, the value is set to 3600 seconds. The // DurationSeconds parameter is separate from the duration of a console session // that you might request using the returned credentials. The request to the // federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int32 @@ -187,8 +164,7 @@ type AssumeRoleWithSAMLInput struct { // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based // policy of the role that is being assumed. For more information, see Session - // Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The plaintext that you use for both inline and managed // session policies can't exceed 2,048 characters. The JSON policy characters can // be any ASCII character from the space character to the end of the valid @@ -206,9 +182,8 @@ type AssumeRoleWithSAMLInput struct { // the role. This parameter is optional. You can provide up to 10 managed policy // ARNs. However, the plaintext that you use for both inline and managed session // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. An Amazon Web Services conversion + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion // compresses the passed inline session policy, managed policy ARNs, and session // tags into a packed binary format that has a separate limit. Your request can // fail for this limit even if your plaintext meets the other requirements. The @@ -220,8 +195,7 @@ type AssumeRoleWithSAMLInput struct { // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the // identity-based policy of the role that is being assumed. For more information, - // see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType @@ -251,19 +225,12 @@ type AssumeRoleWithSAMLOutput struct { Issuer *string // A hash value based on the concatenation of the following: - // - // * The Issuer response - // value. - // - // * The Amazon Web Services account ID. - // - // * The friendly name (the last - // part of the ARN) of the SAML provider in IAM. - // - // The combination of NameQualifier - // and Subject can be used to uniquely identify a federated user. The following - // pseudocode shows how the hash value is calculated: BASE64 ( SHA1 ( - // "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + // - The Issuer response value. + // - The Amazon Web Services account ID. + // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // The combination of NameQualifier and Subject can be used to uniquely identify a + // federated user. The following pseudocode shows how the hash value is calculated: + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) NameQualifier *string // A percentage value that indicates the packed size of the session policies and @@ -272,20 +239,18 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 - // The value in the SourceIdentity attribute in the SAML assertion. You can require - // users to set a source identity value when they assume a role. You do this by - // using the sts:SourceIdentity condition key in a role trust policy. That way, - // actions that are taken with the role are associated with that user. After the - // source identity is set, the value cannot be changed. It is present in the + // The value in the SourceIdentity attribute in the SAML assertion. You can + // require users to set a source identity value when they assume a role. You do + // this by using the sts:SourceIdentity condition key in a role trust policy. That + // way, actions that are taken with the role are associated with that user. After + // the source identity is set, the value cannot be changed. It is present in the // request for all actions that are taken by the role and persists across chained - // role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) // sessions. You can configure your SAML identity provider to use an attribute // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML. You do this by adding an attribute to the SAML + // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: @@ -297,10 +262,10 @@ type AssumeRoleWithSAMLOutput struct { // The format of the name ID, as defined by the Format attribute in the NameID // element of the SAML assertion. Typical examples of the format are transient or - // persistent. If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example, - // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If - // the format includes any other prefix, the format is returned with no + // persistent . If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . + // If the format includes any other prefix, the format is returned with no // modifications. SubjectType *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index e2ff4ac62e..870f3a583d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -14,19 +14,15 @@ import ( // authenticated in a mobile or web application with a web identity provider. // Example providers include the OAuth 2.0 providers Login with Amazon and // Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities -// (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). -// For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide -// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android -// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify a -// user. You can also supply the user with a consistent identity throughout the -// lifetime of an application. To learn more about Amazon Cognito, see Amazon -// Cognito Overview -// (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// . For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. To learn more about Amazon +// Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) // in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview -// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the Amazon Web Services SDK for iOS Developer Guide. Calling // AssumeRoleWithWebIdentity does not require the use of Amazon Web Services // security credentials. Therefore, you can distribute an application (for example, @@ -36,32 +32,28 @@ import ( // Services credentials. Instead, the identity of the caller is validated by using // a token from the web identity provider. For a comparison of // AssumeRoleWithWebIdentity with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. The temporary security credentials returned by this API // consist of an access key ID, a secret access key, and a security token. // Applications can use these temporary security credentials to sign calls to // Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for one -// hour. However, you can use the optional DurationSeconds parameter to specify the -// duration of your session. You can provide a value from 900 seconds (15 minutes) -// up to the maximum session duration setting for the role. This setting can have a -// value from 1 hour to 12 hours. To learn how to view the maximum value for your -// role, see View the Maximum Session Duration Setting for a Role -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// temporary security credentials created by AssumeRoleWithWebIdentity last for +// one hour. However, you can use the optional DurationSeconds parameter to +// specify the duration of your session. You can provide a value from 900 seconds +// (15 minutes) up to the maximum session duration setting for the role. This +// setting can have a value from 1 hour to 12 hours. To learn how to view the +// maximum value for your role, see View the Maximum Session Duration Setting for +// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you use // the AssumeRole* API operations or the assume-role* CLI commands. However the // limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM -// User Guide. Permissions The temporary security credentials created by +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Permissions The temporary security credentials created by // AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web // Services service with the following exception: you cannot call the STS // GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -72,17 +64,14 @@ import ( // credentials in subsequent Amazon Web Services API calls to access resources in // the account that owns the role. You cannot use session policies to grant more // permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. Tags (Optional) You can configure your IdP to pass // attributes into your web identity token as session tags. Each session tag // consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag -// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For -// these and additional limits, see IAM and STS Character Limits -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -93,52 +82,38 @@ import ( // overrides the role tag with the same key. An administrator must grant you the // permissions necessary to pass session tags. The administrator can also create // granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles with -// Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity, you must have an identity token from a supported +// AssumeRoleWithWebIdentity , you must have an identity token from a supported // identity provider and create a role that the application can assume. The role // that your application assumes must trust the identity provider that is // associated with the identity token. In other words, the identity provider must // be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject -// (http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided -// web identity token. We recommend that you avoid using any personally -// identifiable information (PII) in this field. For example, you could instead use -// a GUID or a pairwise identifier, as suggested in the OIDC specification -// (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). For more -// information about how to use web identity federation and the +// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you could +// instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) +// . For more information about how to use web identity federation and the // AssumeRoleWithWebIdentity API, see the following resources: -// -// * Using Web -// Identity Federation API Operations for Mobile Apps -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// * -// Web Identity Federation Playground -// (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, Facebook, -// or Google, getting temporary security credentials, and then using those -// credentials to make a request to Amazon Web Services. -// -// * Amazon Web Services SDK -// for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and Amazon Web -// Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// * Web Identity -// Federation with Mobile Applications -// (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// . +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) +// . Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then using +// those credentials to make a request to Amazon Web Services. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// . These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) +// . This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { if params == nil { params = &AssumeRoleWithWebIdentityInput{} @@ -187,16 +162,14 @@ type AssumeRoleWithWebIdentityInput struct { // higher than this setting, the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. By default, the value is set to 3600 seconds. The // DurationSeconds parameter is separate from the duration of a console session // that you might request using the returned credentials. The request to the // federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int32 @@ -208,8 +181,7 @@ type AssumeRoleWithWebIdentityInput struct { // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based // policy of the role that is being assumed. For more information, see Session - // Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The plaintext that you use for both inline and managed // session policies can't exceed 2,048 characters. The JSON policy characters can // be any ASCII character from the space character to the end of the valid @@ -227,9 +199,8 @@ type AssumeRoleWithWebIdentityInput struct { // the role. This parameter is optional. You can provide up to 10 managed policy // ARNs. However, the plaintext that you use for both inline and managed session // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. An Amazon Web Services conversion + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion // compresses the passed inline session policy, managed policy ARNs, and session // tags into a packed binary format that has a separate limit. Your request can // fail for this limit even if your plaintext meets the other requirements. The @@ -241,8 +212,7 @@ type AssumeRoleWithWebIdentityInput struct { // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the // identity-based policy of the role that is being assumed. For more information, - // see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType @@ -265,7 +235,7 @@ type AssumeRoleWithWebIdentityOutput struct { // that you can use to refer to the resulting temporary security credentials. For // example, you can reference these credentials as a principal in a resource-based // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole. + // RoleSessionName that you specified when you called AssumeRole . AssumedRoleUser *types.AssumedRoleUser // The intended audience (also known as client ID) of the web identity token. This @@ -285,10 +255,10 @@ type AssumeRoleWithWebIdentityOutput struct { // allowed space. PackedPolicySize *int32 - // The issuing authority of the web identity token presented. For OpenID Connect ID - // tokens, this contains the value of the iss field. For OAuth 2.0 access tokens, - // this contains the value of the ProviderId parameter that was passed in the - // AssumeRoleWithWebIdentity request. + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed in + // the AssumeRoleWithWebIdentity request. Provider *string // The value of the source identity that is returned in the JSON web token (JWT) @@ -297,17 +267,14 @@ type AssumeRoleWithWebIdentityOutput struct { // key in a role trust policy. That way, actions that are taken with the role are // associated with that user. After the source identity is set, the value cannot be // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) // sessions. You can configure your identity provider to use an attribute // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web - // token. To learn more about OIDC tokens and claims, see Using Tokens with User - // Pools - // (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON + // web token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b7a637d420..2dcc311081 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -22,27 +22,17 @@ import ( // encoded because the details of the authorization status can contain privileged // information that the user who requested the operation should not see. To decode // an authorization status message, a user must be granted permissions through an -// IAM policy -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) to -// request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. -// The decoded message includes the following type of information: -// -// * Whether the -// request was denied due to an explicit deny or due to the absence of an explicit -// allow. For more information, see Determining Whether a Request is Allowed or -// Denied -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested -// action. -// -// * The requested resource. -// -// * The values of condition keys in the -// context of the user's request. +// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) +// action. The decoded message includes the following type of information: +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether a +// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// - The principal who made the request. +// - The requested action. +// - The requested resource. +// - The values of condition keys in the context of the user's request. func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index b86a425d0a..13aef999e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -11,21 +11,18 @@ import ( ) // Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a -// secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For -// more information about access keys, see Managing Access Keys for IAM Users -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and +// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). +// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) // in the IAM User Guide. When you pass an access key ID to this operation, it // returns the ID of the Amazon Web Services account to which the keys belong. // Access key IDs beginning with AKIA are long-term credentials for an IAM user or // the Amazon Web Services account root user. Access key IDs beginning with ASIA // are temporary credentials that are created using STS operations. If the account // in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) // to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) // in the IAM User Guide. This operation does not indicate the state of the access // key. The key might be active, inactive, or deleted. Active keys might not have // permissions to perform an operation. Providing a deleted access key might return diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index a7f96c2201..ca14ae8894 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -15,9 +15,8 @@ import ( // administrator adds a policy to your IAM user or role that explicitly denies // access to the sts:GetCallerIdentity action, you can still perform this // operation. Permissions are not required because the same information is returned -// when an IAM user or role is denied access. To view an example response, see I Am -// Not Authorized to Perform: iam:DeleteVirtualMFADevice -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// when an IAM user or role is denied access. To view an example response, see I +// Am Not Authorized to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { if params == nil { @@ -49,10 +48,9 @@ type GetCallerIdentityOutput struct { // The Amazon Web Services ARN associated with the calling entity. Arn *string - // The unique identifier of the calling entity. The exact value depends on the type - // of entity that is making the call. The values returned are those listed in the - // aws:userid column in the Principal table - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed in + // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) // found on the Policy Variables reference page in the IAM User Guide. UserId *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 60026a1393..9f31731c35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -11,49 +11,41 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a set of temporary security credentials (consisting of an access key ID, -// a secret access key, and a security token) for a federated user. A typical use -// is in a proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. You must call the +// Returns a set of temporary security credentials (consisting of an access key +// ID, a secret access key, and a security token) for a federated user. A typical +// use is in a proxy application that gets temporary security credentials on behalf +// of distributed applications inside a corporate network. You must call the // GetFederationToken operation using the long-term security credentials of an IAM // user. As a result, this call is appropriate in contexts where those credentials // can be safely stored, usually in a server-based application. For a comparison of // GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. You can create a mobile-based or browser-based app that // can authenticate users using a web identity provider like Login with Amazon, // Facebook, Google, or an OpenID Connect-compatible identity provider. In this // case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity. For more information, see Federation Through a -// Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. You can also call GetFederationToken using the security // credentials of an Amazon Web Services account root user, but we do not recommend // it. Instead, we recommend that you create an IAM user for the purpose of the // proxy application. Then attach a policy to the IAM user that limits federated // users to only the actions and resources that they need to access. For more -// information, see IAM Best Practices -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) in the -// IAM User Guide. Session duration The temporary credentials are valid for the -// specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// information, see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. Session duration The temporary credentials are valid for +// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 // seconds (36 hours). The default session duration is 43,200 seconds (12 hours). // Temporary credentials obtained by using the Amazon Web Services account root // user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions // You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: +// Amazon Web Services service with the following exceptions: +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// - You cannot call any STS operations except GetCallerIdentity . // -// * You cannot call any IAM -// operations using the CLI or the Amazon Web Services API. -// -// * You cannot call any -// STS operations except GetCallerIdentity. -// -// You must pass an inline or managed -// session policy -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// You can use temporary credentials for single sign-on (SSO) to the console. You +// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -64,38 +56,33 @@ import ( // policies and the session policies that you pass. This gives you a way to further // restrict the permissions for a federated user. You cannot use session policies // to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. For information about using GetFederationToken to create // temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// You can use the credentials to access a resource that has a resource-based +// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) +// . You can use the credentials to access a resource that has a resource-based // policy. If that policy specifically references the federated user session in the // Principal element of the policy, the session has the permissions allowed by the // policy. These permissions are granted in addition to the permissions granted by // the session policies. Tags (Optional) You can pass tag key-value pairs to your // session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You can create a mobile-based or browser-based app that can -// authenticate users using a web identity provider like Login with Amazon, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, // Facebook, Google, or an OpenID Connect-compatible identity provider. In this // case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity. For more information, see Federation Through a -// Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. An administrator must grant you the permissions necessary // to pass session tags. The administrator can also create granular permissions to // allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is // preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the -// Department=Marketing tag and you pass the department=engineering session tag. -// Department and department are not saved as separate tags, and the session tag -// passed in the request takes precedence over the user tag. +// tag keys. Assume that the user that you are federating has the Department = +// Marketing tag and you pass the department = engineering session tag. Department +// and department are not saved as separate tags, and the session tag passed in +// the request takes precedence over the user tag. func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { params = &GetFederationTokenInput{} @@ -114,26 +101,27 @@ func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTo type GetFederationTokenInput struct { // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference the - // federated user name in a resource-based policy, such as in an Amazon S3 bucket - // policy. The regex used to validate this parameter is a string of characters - // consisting of upper- and lower-case alphanumeric characters with no spaces. You - // can also include underscores or any of the following characters: =,.@- + // temporary security credentials (such as Bob ). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon S3 + // bucket policy. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- // // This member is required. Name *string - // The duration, in seconds, that the session should last. Acceptable durations for - // federation sessions range from 900 seconds (15 minutes) to 129,600 seconds (36 - // hours), with 43,200 seconds (12 hours) as the default. Sessions obtained using - // Amazon Web Services account root user credentials are restricted to a maximum of - // 3,600 seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using Amazon Web Services account root user credentials are restricted to a + // maximum of 3,600 seconds (one hour). If the specified duration is longer than + // one hour, the session obtained by using root user credentials defaults to one + // hour. DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. This parameter is @@ -143,8 +131,7 @@ type GetFederationTokenInput struct { // session policies that you pass. This gives you a way to further restrict the // permissions for a federated user. You cannot use session policies to grant more // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The resulting credentials can be used to access a // resource that has a resource-based policy. If that policy specifically // references the federated user session in the Principal element of the policy, @@ -165,24 +152,21 @@ type GetFederationTokenInput struct { // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as a managed session policy. The policies must exist in the same account as // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you // use for both inline and managed session policies can't exceed 2,048 characters. // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. This parameter is optional. However, - // if you do not pass any session policies, then the resulting federated user - // session has no permissions. When you pass session policies, the session + // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. This parameter is optional. + // However, if you do not pass any session policies, then the resulting federated + // user session has no permissions. When you pass session policies, the session // permissions are the intersection of the IAM user policies and the session // policies that you pass. This gives you a way to further restrict the permissions // for a federated user. You cannot use session policies to grant more permissions // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The resulting credentials can be used to access a // resource that has a resource-based policy. If that policy specifically // references the federated user session in the Principal element of the policy, @@ -191,20 +175,18 @@ type GetFederationTokenInput struct { // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates by - // percentage how close the policies and tags for your request are to the upper + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper // size limit. PolicyArns []types.PolicyDescriptorType // A list of session tags. Each session tag consists of a key name and an // associated value. For more information about session tags, see Passing Session - // Tags in STS - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - // The plaintext session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -215,9 +197,9 @@ type GetFederationTokenInput struct { // you do, session tags override a user tag with the same key. Tag key–value pairs // are not case sensitive, but case is preserved. This means that you cannot have // separate Department and department tag keys. Assume that the role has the - // Department=Marketing tag and you pass the department=engineering session tag. - // Department and department are not saved as separate tags, and the session tag - // passed in the request takes precedence over the role tag. + // Department = Marketing tag and you pass the department = engineering session + // tag. Department and department are not saved as separate tags, and the session + // tag passed in the request takes precedence over the role tag. Tags []types.Tag noSmithyDocumentSerde @@ -235,7 +217,7 @@ type GetFederationTokenOutput struct { Credentials *types.Credentials // Identifiers for the federated user associated with the credentials (such as - // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use // the federated user's ARN in your resource-based policies, such as an Amazon S3 // bucket policy. FederatedUser *types.FederatedUser diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index bfde51689d..1e5bd4e672 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -11,26 +11,23 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a set of temporary credentials for an Amazon Web Services account or IAM -// user. The credentials consist of an access key ID, a secret access key, and a -// security token. Typically, you use GetSessionToken if you want to use MFA to +// Returns a set of temporary credentials for an Amazon Web Services account or +// IAM user. The credentials consist of an access key ID, a secret access key, and +// a security token. Typically, you use GetSessionToken if you want to use MFA to // protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances. MFA-enabled IAM users would need to call +// Amazon EC2 StopInstances . MFA-enabled IAM users would need to call // GetSessionToken and submit an MFA code that is associated with their MFA device. // Using the temporary security credentials that are returned from the call, IAM // users can then make programmatic calls to API operations that require MFA // authentication. If you do not supply a correct MFA code, then the API returns an // access denied error. For a comparison of GetSessionToken with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// operations that produce temporary credentials, see Requesting Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. No permissions are required for users to perform this // operation. The purpose of the sts:GetSessionToken operation is to authenticate // the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) // in the IAM User Guide. Session Duration The GetSessionToken operation must be // called by using the long-term Amazon Web Services security credentials of the // Amazon Web Services account root user or an IAM user. Credentials that are @@ -41,18 +38,12 @@ import ( // (1 hour), with a default of 1 hour. Permissions The temporary security // credentials created by GetSessionToken can be used to make API calls to any // Amazon Web Services service with the following exceptions: +// - You cannot call any IAM API operations unless MFA authentication +// information is included in the request. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity . // -// * You cannot call -// any IAM API operations unless MFA authentication information is included in the -// request. -// -// * You cannot call any STS API except AssumeRole or -// GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with -// Amazon Web Services account root user credentials. Instead, follow our best -// practices -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// We recommend that you do not call GetSessionToken with Amazon Web Services +// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, and // using IAM users for everyday interaction with Amazon Web Services. The // credentials that are returned by GetSessionToken are based on permissions @@ -62,8 +53,7 @@ import ( // GetSessionToken is called using the credentials of an IAM user, the temporary // credentials have the same permissions as the IAM user. For more information // about using GetSessionToken to create temporary credentials, go to Temporary -// Credentials for Users in Untrusted Environments -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { @@ -90,25 +80,25 @@ type GetSessionTokenInput struct { // Services account owners defaults to one hour. DurationSeconds *int32 - // The identification number of the MFA device that is associated with the IAM user - // who is making the GetSessionToken call. Specify this value if the IAM user has a - // policy that requires MFA authentication. The value is either the serial number - // for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) - // for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find - // the device for an IAM user by going to the Amazon Web Services Management - // Console and viewing the user's security credentials. The regex used to validate - // this parameter is a string of characters consisting of upper- and lower-case - // alphanumeric characters with no spaces. You can also include underscores or any - // of the following characters: =,.@:/- + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM user + // has a policy that requires MFA authentication. The value is either the serial + // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name + // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You + // can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. The regex used + // to validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@:/- SerialNumber *string - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication is - // required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. The - // format for this parameter, as described by its regex pattern, is a sequence of - // six numeric digits. + // The value provided by the MFA device, if MFA is required. If any policy + // requires the IAM user to submit an MFA code, specify this value. If MFA + // authentication is required, the user must provide a code when requesting a set + // of temporary security credentials. A user who fails to provide the code receives + // an "access denied" response when requesting resources that require MFA + // authentication. The format for this parameter, as described by its regex + // pattern, is a sequence of six numeric digits. TokenCode *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go index 7cabbb97e9..7f41c0a6db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -7,6 +7,6 @@ // temporary, limited-privilege credentials for Identity and Access Management // (IAM) users or for users that you authenticate (federated users). This guide // provides descriptions of the STS API. For more information about using this -// service, see Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// service, see Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// . package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 2755e647ee..bdd85eb319 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.0" +const goModuleVersion = "1.18.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index ce9acedcd3..1f99a0209c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -165,6 +165,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-3", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go index 05531d3695..eb60f61b16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -523,9 +523,6 @@ func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { - if len(v) == 0 { - return nil - } array := value.Array("member") for i := range v { @@ -567,9 +564,6 @@ func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { } func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { - if len(v) == 0 { - return nil - } array := value.Array("member") for i := range v { @@ -580,9 +574,6 @@ func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) } func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { - if len(v) == 0 { - return nil - } array := value.Array("member") for i := range v { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 88d3e6c693..097875b279 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -27,7 +27,7 @@ func (e *ExpiredTokenException) ErrorMessage() string { return *e.Message } func (e *ExpiredTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ExpiredTokenException" } return *e.ErrorCodeOverride @@ -57,7 +57,7 @@ func (e *IDPCommunicationErrorException) ErrorMessage() string { return *e.Message } func (e *IDPCommunicationErrorException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IDPCommunicationError" } return *e.ErrorCodeOverride @@ -86,7 +86,7 @@ func (e *IDPRejectedClaimException) ErrorMessage() string { return *e.Message } func (e *IDPRejectedClaimException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IDPRejectedClaim" } return *e.ErrorCodeOverride @@ -114,7 +114,7 @@ func (e *InvalidAuthorizationMessageException) ErrorMessage() string { return *e.Message } func (e *InvalidAuthorizationMessageException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidAuthorizationMessageException" } return *e.ErrorCodeOverride @@ -144,7 +144,7 @@ func (e *InvalidIdentityTokenException) ErrorMessage() string { return *e.Message } func (e *InvalidIdentityTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidIdentityToken" } return *e.ErrorCodeOverride @@ -171,7 +171,7 @@ func (e *MalformedPolicyDocumentException) ErrorMessage() string { return *e.Message } func (e *MalformedPolicyDocumentException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "MalformedPolicyDocument" } return *e.ErrorCodeOverride @@ -183,12 +183,10 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu // compresses the session policy document, session policy ARNs, and session tags // into a packed binary format that has a separate limit. The error message // indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You could receive this error even though you meet other defined -// session policy and session tag limits. For more information, see IAM and STS -// Entity Character Limits -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You could receive this error even though you meet other +// defined session policy and session tag limits. For more information, see IAM +// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. type PackedPolicyTooLargeException struct { Message *string @@ -208,18 +206,17 @@ func (e *PackedPolicyTooLargeException) ErrorMessage() string { return *e.Message } func (e *PackedPolicyTooLargeException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "PackedPolicyTooLarge" } return *e.ErrorCodeOverride } func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// STS is not activated in the requested region for the account that is being asked -// to generate credentials. The account administrator must use the IAM console to -// activate STS in that region. For more information, see Activating and -// Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. type RegionDisabledException struct { Message *string @@ -239,7 +236,7 @@ func (e *RegionDisabledException) ErrorMessage() string { return *e.Message } func (e *RegionDisabledException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "RegionDisabledException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index 86e509905b..90d4f62ae9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -13,9 +13,8 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in - // the IAM User Guide. + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. // // This member is required. Arn *string @@ -62,9 +61,8 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in - // the IAM User Guide. + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. // // This member is required. Arn *string @@ -84,26 +82,23 @@ type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. + // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. Arn *string noSmithyDocumentSerde } -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags to -// control access to resources. For more information, see Tagging Amazon Web -// Services STS Sessions -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. +// You can pass custom key-value pair attributes when you assume a role or +// federate a user. These are called session tags. You can then use the session +// tags to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. type Tag struct { // The key for a session tag. You can pass up to 50 session tags. The plain text // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // This member is required. @@ -111,8 +106,7 @@ type Tag struct { // The value for a session tag. You can pass up to 50 session tags. The plain text // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // This member is required. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 0da3efe4c2..b071cea51d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -49,6 +49,7 @@ type options struct { missedPrioritizedFiles *[]string compression Compression ctx context.Context + minChunkSize int } type Option func(o *options) error @@ -63,6 +64,7 @@ func WithChunkSize(chunkSize int) Option { // WithCompressionLevel option specifies the gzip compression level. // The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. // See also: https://godoc.org/compress/gzip#pkg-constants func WithCompressionLevel(level int) Option { return func(o *options) error { @@ -113,6 +115,18 @@ func WithContext(ctx context.Context) Option { } } +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -180,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { if err != nil { return nil, err } - tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } writers := make([]*Writer, len(tarParts)) payloads := make([]*os.File, len(tarParts)) var mu sync.Mutex @@ -195,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } sw := NewWriterWithCompressor(esgzFile, opts.compression) sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { return err } @@ -209,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = err return nil, err } - tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + tocAndFooter, tocDgst, err := closeWithCombine(writers...) if err != nil { rErr = err return nil, err @@ -252,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { // Writers doesn't write TOC and footer to the underlying writers so they can be // combined into a single eStargz and tocAndFooter returned by this function can // be appended at the tail of that combined blob. -func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { if len(ws) == 0 { return nil, "", fmt.Errorf("at least one writer must be passed") } @@ -395,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader { func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} - pw, err := newCountReader(in) + pw, err := newCountReadSeeker(in) if err != nil { return nil, fmt.Errorf("failed to make position watcher: %w", err) } @@ -571,19 +599,19 @@ func (tf *tempFiles) cleanupAll() error { return errorutil.Aggregate(allErr) } -func newCountReader(r io.ReaderAt) (*countReader, error) { +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { pos := int64(0) - return &countReader{r: r, cPos: &pos}, nil + return &countReadSeeker{r: r, cPos: &pos}, nil } -type countReader struct { +type countReadSeeker struct { r io.ReaderAt cPos *int64 mu sync.Mutex } -func (cr *countReader) Read(p []byte) (int, error) { +func (cr *countReadSeeker) Read(p []byte) (int, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -594,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) { return n, err } -func (cr *countReader) Seek(offset int64, whence int) (int64, error) { +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -615,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) { return offset, nil } -func (cr *countReader) currentPos() int64 { +func (cr *countReadSeeker) currentPos() int64 { cr.mu.Lock() defer cr.mu.Unlock() diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 921e59ec6e..f4d5546558 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -150,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { allErr = append(allErr, err) continue } - if tocSize <= 0 { + if tocOffset >= 0 && tocSize <= 0 { tocSize = sr.Size() - tocOffset - fSize } - if tocSize < int64(len(maybeTocBytes)) { + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { maybeTocBytes = maybeTocBytes[:tocSize] } r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) @@ -207,8 +207,16 @@ func (r *Reader) initFields() error { uname := map[int]string{} gname := map[int]string{} var lastRegEnt *TOCEntry - for _, ent := range r.toc.Entries { + var chunkTopIndex int + for i, ent := range r.toc.Entries { ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } if ent.Type == "reg" { lastRegEnt = ent } @@ -294,7 +302,7 @@ func (r *Reader) initFields() error { if e.isDataType() { e.nextOffset = lastOffset } - if e.Offset != 0 { + if e.Offset != 0 && e.InnerOffset == 0 { lastOffset = e.Offset } } @@ -488,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { // // Name must be absolute path or one that is relative to root. func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { name = cleanEntryName(name) ent, ok := r.Lookup(name) if !ok { @@ -505,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { Err: errors.New("not a regular file"), } } - fr := &fileReader{ + return &fileReader{ r: r, size: ent.Size, ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err } + fr.preRead = preRead return io.NewSectionReader(fr, 0, fr.size), nil } @@ -521,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { } type fileReader struct { - r *Reader - size int64 - ents []*TOCEntry // 1 or more reg/chunk entries + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error } func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { @@ -578,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { - return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") } - return io.ReadFull(dr, p) + return retN, retErr } // A Writer writes stargz files. @@ -599,11 +662,20 @@ type Writer struct { lastGroupname map[int]string compressor Compressor + uncompressedCounter *countWriteFlusher + // ChunkSize optionally controls the maximum number of bytes // of data of a regular file that can be written in one gzip // stream before a new gzip stream is started. // Zero means to use a default, currently 4 MiB. ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} } // currentCompressionWriter writes to the current w.gz field, which can @@ -646,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { if err != nil { return nil, fmt.Errorf("failed to parse footer: %w", err) } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -672,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { bw := bufio.NewWriter(w) cw := &countWriter{w: bw} return &Writer{ - bw: bw, - cw: cw, - toc: &JTOC{Version: 1}, - diffHash: sha256.New(), - compressor: c, + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, } } @@ -717,6 +793,20 @@ func (w *Writer) closeGz() error { return nil } +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + // nameIfChanged returns name, unless it was the already the value of (*mp)[id], // in which case it returns the empty string. func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { @@ -736,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { func (w *Writer) condOpenGz() (err error) { if w.gz == nil { w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } } return } @@ -784,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { if lossless { tr.RawAccounting = true } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 for { h, err := tr.Next() if err == io.EOF { @@ -883,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { totalSize := ent.Size // save it before we destroy ent tee := io.TeeReader(tr, payloadDigest.Hash()) for written < totalSize { - if err := w.closeGz(); err != nil { - return err - } - chunkSize := int64(w.chunkSize()) remain := totalSize - written if remain < chunkSize { @@ -894,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } else { ent.ChunkSize = chunkSize } - ent.Offset = w.cw.n + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + ent.ChunkOffset = written chunkDigest := digest.Canonical.Digester() @@ -940,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { return err } +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + // DiffID returns the SHA-256 of the uncompressed tar bytes. // It is only valid to call DiffID after Close. func (w *Writer) DiffID() string { @@ -956,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { } func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } if len(tocBytes) > 0 { start := time.Now() toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) @@ -1021,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) { return } +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + // isGzip reports whether br is positioned right before an upcoming gzip stream. // It does not consume any bytes from br. func isGzip(br *bufio.Reader) bool { @@ -1039,3 +1210,14 @@ func positive(n int64) int64 { } return n } + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 591d7a62e1..f24afe32f4 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -60,7 +60,7 @@ type GzipCompressor struct { compressionLevel int } -func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { return gzip.NewWriterLevel(w, gc.compressionLevel) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 37448cae08..0ca6fd75f2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,6 +31,7 @@ import ( "errors" "fmt" "io" + "math/rand" "os" "path/filepath" "reflect" @@ -44,21 +45,27 @@ import ( digest "github.com/opencontainers/go-digest" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - CountStreams(*testing.T, []byte) int + TestStreams(t *testing.T, b []byte, streams []int64) DiffIDOf(*testing.T, []byte) string String() string } // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingController) { +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } +type TestingControllerFactory func() TestingController + const ( uncompressedType int = iota gzipType @@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingController) { +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - chunkSize int - in []tarEntry + name string + chunkSize int + minChunkSize []int + in []tarEntry }{ { name: "regfiles and directories", @@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) { ), }, { - name: "various files", - chunkSize: 4, + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, in: tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), - file("foo.txt", "a"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), dir("dev/"), @@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) { }, } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, prefix := range allowedPrefix { prefix := prefix - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { - tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) - // Test divideEntries() - entries, err := sortEntries(tarBlob, nil, nil) // identical order - if err != nil { - t.Fatalf("failed to parse tar: %v", err) - } - var merged []*entry - for _, part := range divideEntries(entries, 4) { - merged = append(merged, part...) - } - if !reflect.DeepEqual(entries, merged) { - for _, e := range entries { - t.Logf("Original: %v", e.header) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) } - for _, e := range merged { - t.Logf("Merged: %v", e.header) + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return } - t.Errorf("divided entries couldn't be merged") - return - } - // Prepare sample data - wantBuf := new(bytes.Buffer) - sw := NewWriterWithCompressor(wantBuf, cl) - sw.ChunkSize = tt.chunkSize - if err := sw.AppendTar(tarBlob); err != nil { - t.Fatalf("failed to append tar to want stargz: %v", err) - } - if _, err := sw.Close(); err != nil { - t.Fatalf("failed to prepare want stargz: %v", err) - } - wantData := wantBuf.Bytes() - want, err := Open(io.NewSectionReader( - bytes.NewReader(wantData), 0, int64(len(wantData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the want stargz: %v", err) - } + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } - // Prepare testing data - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(tt.chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to build stargz: %v", err) - } - defer rc.Close() - gotBuf := new(bytes.Buffer) - if _, err := io.Copy(gotBuf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - gotData := gotBuf.Bytes() - got, err := Open(io.NewSectionReader( - bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the got stargz: %v", err) - } + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } - // Check DiffID is properly calculated - rc.Close() - diffID := rc.DiffID() - wantDiffID := cl.DiffIDOf(t, gotData) - if diffID.String() != wantDiffID { - t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) - } + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } - // Compare as stargz - if !isSameVersion(t, cl, wantData, gotData) { - t.Errorf("built stargz hasn't same json") - return - } - if !isSameEntries(t, want, got) { - t.Errorf("built stargz isn't same as the original") - return - } + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } - // Compare as tar.gz - if !isSameTarGz(t, cl, wantData, gotData) { - t.Errorf("built stargz isn't same tar.gz") - return - } - }) + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } } } } @@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) { } } -func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { - aGz, err := controller.Reader(bytes.NewReader(a)) +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") } defer aGz.Close() - bGz, err := controller.Reader(bytes.NewReader(b)) + bGz, err := clb.Reader(bytes.NewReader(b)) if err != nil { t.Fatalf("failed to read B") } @@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return true } -func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { - aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) } - bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) if err != nil { t.Fatalf("failed to parse B: %v", err) } @@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool { a.GID == b.GID && a.Uname == b.Uname && a.Gname == b.Gname && - (a.Offset > 0) == (b.Offset > 0) && + (a.Offset >= 0) == (b.Offset >= 0) && (a.NextOffset() > 0) == (b.NextOffset() > 0) && a.DevMajor == b.DevMajor && a.DevMinor == b.DevMinor && @@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 // type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingController) { +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) - checks []check + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int }{ { name: "no-regfile", @@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { regDigest(t, "test/bar.txt", "bbb", dgstMap), ) }, + minChunkSize: []int{0, 64000}, checks: []check{ checkStargzTOC, checkVerifyTOC, @@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { }, }, { - name: "with-non-regfiles", + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), symlink("barlink", "test/bar.txt"), dir("test/"), regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), @@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { checkVerifyInvalidStargzFail(buildTar(t, tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), file("test/bar.txt", "testbartestbar"), @@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { - // Get original tar file and chunk digests - dgstMap := make(map[string]digest.Digest) - tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) - - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to convert stargz: %v", err) - } - tocDigest := rc.TOCDigest() - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - newStargz := buf.Bytes() - // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. - dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) - for _, check := range tt.checks { - check(t, newStargz, tocDigest, dgstMap, cl) - } - }) + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } } } } @@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { var found bool @@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { - rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { t.Fatalf("failed to convert stargz: %v", err) } @@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { sgz, err := Open( io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), - WithDecompressors(controller), + WithDecompressors(cl), ) if err != nil { t.Fatalf("failed to parse converted stargz: %v", err) @@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT } // Decode the TOC JSON - tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) @@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingController) { +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} sampleOwner := owner{uid: 50, gid: 100} + data64KB := randomContents(64000) + tests := []struct { - name string - chunkSize int - in []tarEntry - want []stargzCheck - wantNumGz int // expected number of streams + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz wantFailOnLossLess bool + wantTOCVersion int // default = 1 }{ { - name: "empty", - in: tarOf(), - wantNumGz: 2, // empty tar + TOC + footer - wantNumGzLossLess: 3, // empty tar + TOC + footer + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer want: checks( numTOCEntries(0), ), @@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { dir("foo/"), file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), ), - wantNumGz: 9, + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer want: checks( numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file hasDir("foo/"), @@ -1326,23 +1371,108 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { mustSameEntry("foo/foo1", "foolink"), ), }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, } for _, tt := range tests { - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) var stargzBuf bytes.Buffer - w := NewWriterWithCompressor(&stargzBuf, cl) + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize if lossless { err := w.AppendTarLossLess(tr) if tt.wantFailOnLossLess { @@ -1366,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { if lossless { // Check if the result blob reserves original tar metadata - rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) if err != nil { t.Errorf("failed to decompress blob: %v", err) return @@ -1385,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } diffID := w.DiffID() - wantDiffID := cl.DiffIDOf(t, b) + wantDiffID := cl1.DiffIDOf(t, b) if diffID != wantDiffID { t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) } - got := cl.CountStreams(t, b) - wantNumGz := tt.wantNumGz - if lossless && tt.wantNumGzLossLess > 0 { - wantNumGz = tt.wantNumGzLossLess - } - if got != wantNumGz { - t.Errorf("number of streams = %d; want %d", got, wantNumGz) - } - telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) r, err := Open( - io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), - WithDecompressors(cl), + sr, + WithDecompressors(cl1), WithTelemetry(telemetry), ) if err != nil { t.Fatalf("stargz.Open: %v", err) } - if err := checkCalled(); err != nil { + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { t.Errorf("telemetry failure: %v", err) } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + for _, want := range tt.want { want.check(t, r) } @@ -1422,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } } -func newCalledTelemetry() (telemetry *Telemetry, check func() error) { +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { var getFooterLatencyCalled bool var getTocLatencyCalled bool var deserializeTocLatencyCalled bool @@ -1430,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) { func(time.Time) { getFooterLatencyCalled = true }, func(time.Time) { getTocLatencyCalled = true }, func(time.Time) { deserializeTocLatencyCalled = true }, - }, func() error { + }, func(needsGetTOC bool) error { var allErr []error if !getFooterLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) } - if !getTocLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } } if !deserializeTocLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) @@ -1573,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck { }) } +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + func hasFileContentsRange(file string, offset int, want string) stargzCheck { return stargzCheckFn(func(t *testing.T, r *Reader) { f, err := r.OpenFile(file) @@ -1585,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) } if string(got) != want { - t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) } }) } @@ -1797,6 +2020,13 @@ func mustSameEntry(files ...string) stargzCheck { }) } +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + func tarOf(s ...tarEntry) []tarEntry { return s } type tarEntry interface { @@ -2056,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin }) } +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + func fileModeToTarMode(mode os.FileMode) (int64, error) { h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") if err != nil { @@ -2073,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go index 3bc74463ec..57e0aa614e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -149,6 +149,12 @@ type TOCEntry struct { // ChunkSize. Offset int64 `json:"offset,omitempty"` + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + nextOffset int64 // the Offset of the next entry with a non-zero Offset // DevMajor is the major device number for "char" and "block" types. @@ -186,6 +192,9 @@ type TOCEntry struct { ChunkDigest string `json:"chunkDigest,omitempty"` children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int } // ModTime returns the entry's modification time. @@ -279,7 +288,10 @@ type Compressor interface { // Writer returns WriteCloser to be used for writing a chunk to eStargz. // Everytime a chunk is written, the WriteCloser is closed and Writer is // called again for writing the next chunk. - Writer(w io.Writer) (io.WriteCloser, error) + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) // WriteTOCAndFooter is called to write JTOC to the passed Writer. // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob @@ -303,8 +315,12 @@ type Decompressor interface { // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between // the top until the TOC JSON). // - // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range - // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) // ParseTOC parses TOC from the passed reader. The reader provides the partial contents @@ -313,5 +329,14 @@ type Decompressor interface { // This function returns tocDgst that represents the digest of TOC that will be used // to verify this blob. This must match to the value returned from // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) } + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index 8990f85b56..483743c992 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -1,9 +1,10 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/docs/generate-authors.sh`. +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. Aanand Prasad Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron.L.Xu Abdur Rehman Abhinandan Prativadi @@ -24,22 +25,27 @@ Akihiro Suda Akim Demaille Alan Thompson Albert Callarisa +Alberto Roura Albin Kerouanton Aleksa Sarai Aleksander Piotrowski Alessandro Boch +Alex Couture-Beil Alex Mavrogiannis Alex Mayer Alexander Boyd Alexander Larsson -Alexander Morozov +Alexander Morozov Alexander Ryabov Alexandre González +Alexey Igrychev +Alexis Couvreur Alfred Landrum Alicia Lauerman Allen Sun Alvin Deng Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> Amir Goldstein Amit Krishnan Amit Shukla @@ -48,6 +54,8 @@ Anca Iordache Anda Xu Andrea Luzzardi Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel Andrew France Andrew Hsu Andrew Macpherson @@ -67,8 +75,9 @@ Antonis Kalipetis Anusha Ragunathan Ao Li Arash Deshmeh -Arko Dasgupta -Arnaud Porterie +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout Arthur Peka Ashwini Oruganti Azat Khuyiyakhmetov @@ -76,18 +85,23 @@ Bardia Keyoumarsi Barnaby Gray Bastiaan Bakker BastianHofmann +Ben Bodenmiller Ben Bonnefoy Ben Creasy Ben Firshman Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater Benoit Sigoure Bhumika Bayani Bill Wang Bin Liu Bingshen Wang +Bishal Das Boaz Shuster Bogdan Anton Boris Pruessmann +Brad Baker Bradley Cicenas Brandon Mitchell Brandon Philips @@ -96,6 +110,7 @@ Bret Fisher Brian (bex) Exelbierd Brian Goff Brian Wieder +Bruno Sousa Bryan Bess Bryan Boreham Bryan Murphy @@ -114,15 +129,19 @@ Charles Chan Charles Law Charles Smith Charlie Drage +Charlotte Mach ChaYoung You +Chee Hau Lim Chen Chuanliang Chen Hanxiao Chen Mingjie Chen Qiu +Chris Couzens Chris Gavin Chris Gibson Chris McKinnel Chris Snow +Chris Vermilion Chris Weyl Christian Persson Christian Stefanescu @@ -131,6 +150,7 @@ Christophe Vidal Christopher Biscardi Christopher Crone Christopher Jones +Christopher Svensson Christy Norman Chun Chen Clinton Kitson @@ -139,8 +159,10 @@ Colin Hebert Collin Guarino Colm Hally Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby Corey Farrell Corey Quon +Cory Bennet Craig Wilhite Cristian Staretu Daehyeok Mun @@ -170,11 +192,13 @@ Dattatraya Kumbhar Dave Goodchild Dave Henderson Dave Tucker +David Alvarez David Beitey David Calavera David Cramer David Dooling David Gageot +David Karlsson David Lechner David Scott David Sheets @@ -186,7 +210,8 @@ Denis Defreyne Denis Gladkikh Denis Ollier Dennis Docter -Derek McGowan +Derek McGowan +Des Preston Deshi Xiao Dharmit Shah Dhawal Yogesh Bhanushali @@ -196,12 +221,14 @@ Dimitry Andric Ding Fei Diogo Monica Djordje Lukic +Dmitriy Fishman Dmitry Gusev Dmitry Smirnov Dmitry V. Krivenok Dominik Braun Don Kjer Dong Chen +DongGeon Lee Doug Davis Drew Erny Ed Costello @@ -211,12 +238,14 @@ Eli Uriegas Elias Faxö Elliot Luo <956941328@qq.com> Eric Curtin +Eric Engestrom Eric G. Noriega Eric Rosenberg Eric Sage Eric-Olivier Lamey Erica Windisch Erik Hollensbe +Erik Humphrey Erik St. Martin Essam A. Hassan Ethan Haynes @@ -229,8 +258,10 @@ Evelyn Xu Everett Toews Fabio Falci Fabrizio Soppelsa +Felix Geyer Felix Hupfeld Felix Rabe +fezzik1620 Filip Jareš Flavio Crisciani Florian Klein @@ -242,6 +273,7 @@ Frederic Hemberger Frederick F. Kautz IV Frederik Nordahl Jul Sabroe Frieder Bluemle +Gabriel Gore Gabriel Nicolas Avellaneda Gaetan de Villele Gang Qiao @@ -251,13 +283,18 @@ George MacRorie George Xie Gianluca Borello Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov Goksu Toprak Gou Rao +Govind Rai Grant Reaber Greg Pflaum +Gsealy Guilhem Lettron Guillaume J. Charmes Guillaume Le Floch +Guillaume Tardif gwx296173 Günther Jungbluth Hakan Özler @@ -278,6 +315,7 @@ Hugo Gabriel Eyherabide huqun Huu Nguyen Hyzhou Zhy +Iain Samuel McLean Elder Ian Campbell Ian Philpot Ignacio Capurro @@ -287,6 +325,7 @@ Ilya Sotkov Ioan Eugen Stan Isabel Jimenez Ivan Grcic +Ivan Grund Ivan Markin Jacob Atzen Jacob Tomlinson @@ -302,15 +341,18 @@ Jan-Jaap Driessen Jana Radhakrishnan Jared Hocutt Jasmine Hegman +Jason Hall Jason Heiss Jason Plum Jay Kamat +Jean Lecordier Jean Rouge Jean-Christophe Sirot Jean-Pierre Huynh Jeff Lindsay Jeff Nickoloff Jeff Silberman +Jennings Zhang Jeremy Chambers Jeremy Unruh Jeremy Yallop @@ -322,6 +364,7 @@ Jian Zhang Jie Luo Jilles Oldenbeuving Jim Galasyn +Jim Lin Jimmy Leger Jimmy Song jimmyxian @@ -338,6 +381,7 @@ Johannes 'fish' Ziemke John Feminella John Harris John Howard +John Howard John Laswell John Maguire John Mulhausen @@ -347,13 +391,16 @@ John Tims John V. Martinez John Willis Jon Johnson +Jon Zeolla Jonatas Baldin Jonathan Boulle Jonathan Lee Jonathan Lomas Jonathan McCrohan +Jonathan Warriss-Simmons Jonh Wendell Jordan Jennings +Jorge Vallecillo Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> Joseph Kern Josh Bodah @@ -383,9 +430,11 @@ Katie McLaughlin Ke Xu Kei Ohmura Keith Hudgins +Kelton Bassingthwaite Ken Cochrane Ken ICHIKAWA Kenfe-Mickaël Laventure +Kevin Alvarez Kevin Burke Kevin Feyrer Kevin Kern @@ -401,6 +450,7 @@ Krasi Georgiev Kris-Mikael Krister Kun Zhang Kunal Kushwaha +Kyle Mitofsky Lachlan Cooper Lai Jiangshan Lars Kellogg-Stedman @@ -410,6 +460,7 @@ Lee Gaines Lei Jitang Lennie Leo Gallucci +Leonid Skorospelov Lewis Daly Li Yi Li Yi @@ -445,6 +496,7 @@ Manjunath A Kumatagi Mansi Nahar mapk0y Marc Bihlmaier +Marc Cornellà Marco Mariani Marco Vedovati Marcus Martins @@ -459,6 +511,7 @@ Mason Fish Mason Malone Mateusz Major Mathieu Champlon +Mathieu Rollet Matt Gucci Matt Robenolt Matteo Orefice @@ -467,11 +520,13 @@ Matthieu Hauglustaine Mauro Porras P Max Shytikov Maxime Petazzoni +Maximillian Fan Xavier Mei ChunTao +Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith Michael Bridgen -Michael Crosby +Michael Crosby Michael Friis Michael Irwin Michael Käufl @@ -487,6 +542,7 @@ Mihai Borobocea Mihuleacc Sergiu Mike Brown Mike Casas +Mike Dalton Mike Danese Mike Dillon Mike Goelzer @@ -503,9 +559,12 @@ Mohini Anne Dsouza Moorthy RS Morgan Bauer Morten Hekkvang +Morten Linderud Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> Mrunal Patel muicoder +Murukesh Mohanan Muthukumar R Máximo Cuadros Mårten Cassel @@ -521,6 +580,7 @@ Nathan LeClaire Nathan McCauley Neil Peterson Nick Adcock +Nick Santos Nico Stapelbroek Nicola Kabar Nicolas Borboën @@ -535,6 +595,8 @@ Noah Treuhaft O.S. Tezer Odin Ugedal ohmystack +OKA Naoya +Oliver Pomeroy Olle Jonsson Olli Janatuinen Oscar Wieman @@ -550,9 +612,12 @@ Paul Lietar Paul Mulders Paul Weaver Pavel Pospisil +Paweł Gronowski +Paweł Pokrywka Paweł Szczekutowicz Peeyush Gupta Per Lundberg +Peter Dave Hello Peter Edge Peter Hsu Peter Jaffe @@ -560,11 +625,13 @@ Peter Kehl Peter Nagy Peter Salvatore Peter Waller -Phil Estes +Phil Estes Philip Alexander Etling Philipp Gillé Philipp Schmied +Phong Tran pidster +Pieter E Smit pixelistik Pratik Karki Prayag Verma @@ -574,6 +641,7 @@ Qiang Huang Qinglan Peng qudongfang Raghavendra K T +Rahul Kadyan Rahul Zoldyck Ravi Shekhar Jethani Ray Tsang @@ -582,6 +650,7 @@ Remy Suen Renaud Gaubert Ricardo N Feliciano Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> Richard Mathie Richard Scothern Rick Wieman @@ -591,6 +660,7 @@ Rob Gulewich Robert Wallis Robin Naundorf Robin Speekenbrink +Roch Feuillade Rodolfo Ortiz Rogelio Canedo Rohan Verma @@ -609,11 +679,13 @@ Sainath Grandhi Sakeven Jiang Sally O'Malley Sam Neirinck +Sam Thibault Samarth Shah Sambuddha Basu Sami Tabet Samuel Cochran Samuel Karp +Sandro Jäckel Santhosh Manohar Sargun Dhillon Saswat Bhattacharya @@ -643,7 +715,8 @@ Slava Semushin Solomon Hykes Song Gao Spencer Brown -squeegels <1674195+squeegels@users.noreply.github.com> +Spring Lee +squeegels Srini Brahmaroutu Stefan S. Stefan Scherer @@ -654,6 +727,7 @@ Stephen Rust Steve Durrheimer Steve Richards Steven Burgess +Stoica-Marcu Floris-Andrei Subhajit Ghosh Sun Jianbo Sune Keller @@ -665,7 +739,10 @@ Sébastien HOUZÉ T K Sourabh TAGOMORI Satoshi taiji-tech +Takeshi Koenuma +Takuya Noguchi Taylor Jones +Teiva Harsanyi Tejaswini Duggaraju Tengfei Wang Teppei Fukuda @@ -696,6 +773,7 @@ Tom Fotherby Tom Klingenberg Tom Milligan Tom X. Tobin +Tomas Bäckman Tomas Tomecek Tomasz Kopczynski Tomáš Hrčka @@ -711,6 +789,7 @@ Ulrich Bareth Ulysses Souza Umesh Yadav Valentin Lorentz +Vardan Pogosian Venkateswara Reddy Bukkasamudram Veres Lajos Victor Vieux @@ -757,6 +836,7 @@ Yunxiang Huang Zachary Romero Zander Mackie zebrilee +Zeel B Patel Zhang Kun Zhang Wei Zhang Wentao @@ -768,4 +848,5 @@ Zhu Guihua Álex González Álvaro Lázaro Átila Camurça Alves +Александр Менщиков <__Singleton__@hackerdom.ru> 徐俊杰 diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 31ad117d41..b7c05c3f86 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -19,7 +19,7 @@ const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" configFileDir = ".docker" - oldConfigfile = ".dockercfg" + oldConfigfile = ".dockercfg" // Deprecated: remove once we stop printing deprecation warning contextsDir = "contexts" ) @@ -84,16 +84,6 @@ func Path(p ...string) (string, error) { return path, nil } -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { @@ -140,12 +130,8 @@ func load(configDir string) (*configfile.ConfigFile, bool, error) { // Can't find latest config file so check for the old one filename = filepath.Join(getHomeDir(), oldConfigfile) - if file, err := os.Open(filename); err == nil { + if _, err := os.Stat(filename); err == nil { printLegacyFileWarning = true - defer file.Close() - if err := configFile.LegacyLoadFromReader(file); err != nil { - return configFile, printLegacyFileWarning, errors.Wrap(err, filename) - } } return configFile, printLegacyFileWarning, nil } @@ -158,7 +144,7 @@ func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) } if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format has been removed and the configuration file will be ignored") } if !configFile.ContainsAuth() { configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index d6f710817a..609a88c278 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,9 +3,7 @@ package configfile import ( "encoding/base64" "encoding/json" - "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -16,13 +14,6 @@ import ( "github.com/sirupsen/logrus" ) -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexServer = "https://index.docker.io/v1/" -) - // ConfigFile ~/.docker/config.json file info type ConfigFile struct { AuthConfigs map[string]types.AuthConfig `json:"auths"` @@ -46,8 +37,7 @@ type ConfigFile struct { PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored. CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` @@ -60,11 +50,7 @@ type ProxyConfig struct { HTTPSProxy string `json:"httpsProxy,omitempty"` NoProxy string `json:"noProxy,omitempty"` FTPProxy string `json:"ftpProxy,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` + AllProxy string `json:"allProxy,omitempty"` } // New initializes an empty configuration file for the given filename 'fn' @@ -78,44 +64,6 @@ func New(fn string) *ConfigFile { } } -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexServer - configFile.AuthConfigs[defaultIndexServer] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - // LoadFromReader reads the configuration data given and sets up the auth config // information with given directory and populates the receiver object func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { @@ -134,7 +82,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { ac.ServerAddress = addr configFile.AuthConfigs[addr] = ac } - return checkKubernetesConfiguration(configFile.Kubernetes) + return nil } // ContainsAuth returns whether there is authentication configured @@ -191,10 +139,10 @@ func (configFile *ConfigFile) Save() (retErr error) { } dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return err } - temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) if err != nil { return err } @@ -244,6 +192,7 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]* "HTTPS_PROXY": &config.HTTPSProxy, "NO_PROXY": &config.NoProxy, "FTP_PROXY": &config.FTPProxy, + "ALL_PROXY": &config.AllProxy, } m := runOpts if m == nil { @@ -292,12 +241,11 @@ func decodeAuth(authStr string) (string, string, error) { if n > decLen { return "", "", errors.Errorf("Something went wrong decoding auth config") } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { + userName, password, ok := strings.Cut(string(decoded), ":") + if !ok || userName == "" { return "", "", errors.Errorf("Invalid auth configuration file") } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil + return userName, strings.Trim(password, "\x00"), nil } // GetCredentialsStore returns a new credentials store from the settings in the @@ -352,7 +300,8 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, for registryHostname := range configFile.CredentialHelpers { newAuth, err := configFile.GetAuthConfig(registryHostname) if err != nil { - return nil, err + logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) + continue } auths[registryHostname] = newAuth } @@ -399,17 +348,3 @@ func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) delete(configFile.Plugins, pluginname) } } - -func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { - if kubeConfig == nil { - return nil - } - switch kubeConfig.AllNamespaces { - case "": - case "enabled": - case "disabled": - default: - return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) - } - return nil -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go index 6af6718126..353887547c 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -12,7 +12,7 @@ import ( // ignoring any error during the process. func copyFilePermissions(src, dst string) { var ( - mode os.FileMode = 0600 + mode os.FileMode = 0o600 uid, gid int ) diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index e509820b73..de1c676e50 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -75,7 +75,6 @@ func ConvertToHostname(url string) string { stripped = strings.TrimPrefix(url, "https://") } - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] + hostName, _, _ := strings.Cut(stripped, "/") + return hostName } diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index dffacff112..0728bfe18f 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -1,5 +1,6 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. Aanand Prasad Aaron Davidson @@ -7,16 +8,17 @@ Aaron Feng Aaron Hnatiw Aaron Huslage Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron Welch -Aaron.L.Xu Abel Muiño Abhijeet Kasurde -Abhinandan Prativadi +Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda Abhishek Sharma Abin Shahab +Abirdcfly +Ada Mancini Adam Avilla Adam Dobrawy Adam Eijdenberg @@ -26,6 +28,7 @@ Adam Mills Adam Pointer Adam Singer Adam Walz +Adam Williams Addam Hardy Aditi Rajagopal Aditya @@ -51,6 +54,7 @@ Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle +Akshay Moghe Al Tobey alambike Alan Hoyle @@ -58,9 +62,11 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang -Albin Kerouanton +Albin Kerouanton +Alec Benson Alejandro González Hevia Aleksa Sarai +Aleksandr Chebotov Aleksandrs Fadins Alena Prokharchyk Alessandro Boch @@ -72,6 +78,7 @@ Alex Crawford Alex Ellis Alex Gaynor Alex Goodman +Alex Nordlund Alex Olshansky Alex Samorukov Alex Warhawk @@ -79,7 +86,8 @@ Alexander Artemenko Alexander Boyd Alexander Larsson Alexander Midlash -Alexander Morozov +Alexander Morozov +Alexander Polakov Alexander Shopov Alexandre Beslic Alexandre Garnier @@ -90,7 +98,8 @@ Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin -Alexis THOMAS +Alexis Ries +Alexis Thomas Alfred Landrum Ali Dehghani Alicia Lauerman @@ -103,6 +112,7 @@ Alvin Deng Alvin Richards amangoel Amen Belayneh +Ameya Gawde Amir Goldstein Amit Bakshi Amit Krishnan @@ -126,6 +136,7 @@ Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan +Andrei Ushakov Andrei Vagin Andrew C. Bodine Andrew Clay Shafer @@ -135,6 +146,7 @@ Andrew Gerrand Andrew Guenther Andrew He Andrew Hsu +Andrew Kim Andrew Kuklewicz Andrew Macgregor Andrew Macpherson @@ -150,15 +162,17 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins -andy Andy Chambers andy diller Andy Goldstein Andy Kipp +Andy Lindeman Andy Rothfusz Andy Smith Andy Wilson +Andy Zhang Anes Hasicic +Angel Velazquez Anil Belur Anil Madhavapeddy Ankit Jain @@ -179,20 +193,24 @@ Antonio Murdaca Antonis Kalipetis Antony Messerli Anuj Bahuguna +Anuj Varma Anusha Ragunathan +Anyu Wang apocas Arash Deshmeh ArikaChen -Arko Dasgupta +Arko Dasgupta Arnaud Lefebvre -Arnaud Porterie +Arnaud Porterie Arnaud Rebillout +Artem Khramov Arthur Barr Arthur Gautier Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -200,17 +218,21 @@ Avi Miller Avi Vaid ayoshitake Azat Khuyiyakhmetov +Bao Yonglei Bardia Keyoumarsi Barnaby Gray Barry Allard Bartłomiej Piotrowski Bastiaan Bakker +Bastien Pascard bdevloed +Bearice Ren Ben Bonnefoy Ben Firshman Ben Golub Ben Gould Ben Hall +Ben Langfeld Ben Sargent Ben Severson Ben Toews @@ -218,6 +240,7 @@ Ben Wiklund Benjamin Atkin Benjamin Baker Benjamin Boudreau +Benjamin Böhmke Benjamin Yolken Benny Ng Benoit Chesneau @@ -231,12 +254,15 @@ Bhiraj Butala Bhumika Bayani Bilal Amarni Bill Wang +Billy Ridgway Bily Zhang Bin Liu Bingshen Wang +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott +Bojun Zhu Boqin Qin Boris Pruessmann Boshi Lian @@ -252,6 +278,7 @@ Brendan Dixon Brent Salisbury Brett Higgins Brett Kochendorfer +Brett Milford Brett Randall Brian (bex) Exelbierd Brian Bland @@ -282,6 +309,7 @@ Byung Kang Caleb Spare Calen Pennington Cameron Boehmer +Cameron Sparr Cameron Spear Campbell Allen Candid Dauth @@ -316,6 +344,7 @@ Charlie Drage Charlie Lewis Chase Bolt ChaYoung You +Chee Hau Lim Chen Chao Chen Chuanliang Chen Hanxiao @@ -325,6 +354,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chenyang Yan chenyuzhu Chetan Birajdar Chewey @@ -339,6 +369,7 @@ Chris Fordham Chris Gavin Chris Gibson Chris Khoo +Chris Kreussling (Flatbush Gardener) Chris McKinnel Chris McKinnel Chris Price @@ -351,6 +382,7 @@ Chris Telfer Chris Wahl Chris Weyl Chris White +Christian Becker Christian Berendt Christian Brauner Christian Böhme @@ -359,6 +391,7 @@ Christian Persson Christian Rotzoll Christian Simon Christian Stefanescu +Christoph Ziebuhr Christophe Mehay Christophe Troestler Christophe Vidal @@ -372,7 +405,9 @@ Christy Norman Chun Chen Ciro S. Costa Clayton Coleman +Clint Armstrong Clinton Kitson +clubby789 Cody Roseborough Coenraad Loubser Colin Dunklau @@ -383,19 +418,23 @@ Colin Walters Collin Guarino Colm Hally companycy +Conor Evans Corbin Coleman Corey Farrell Cory Forsyth +Cory Snider cressie176 -CrimsonGlory Cristian Ariza Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian CUI Wei +cuishuang +Cuong Manh Le Cyprian Gracz Cyril F +Da McGrady Daan van Berkel Daehyeok Mun Dafydd Crosby @@ -413,6 +452,7 @@ Dan Hirsch Dan Keder Dan Levy Dan McPherson +Dan Plamadeala Dan Stine Dan Williams Dani Hodovic @@ -433,6 +473,7 @@ Daniel Mizyrycki Daniel Nephin Daniel Norberg Daniel Nordberg +Daniel P. Berrangé Daniel Robinson Daniel S Daniel Sweet @@ -441,6 +482,7 @@ Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang +Daniele Rondina Danny Berger Danny Milosavljevic Danny Yates @@ -456,6 +498,7 @@ Dave Henderson Dave MacDonald Dave Tucker David Anderson +David Bellotti David Calavera David Chung David Corking @@ -470,9 +513,11 @@ David Lawrence David Lechner David M. Karr David Mackey +David Manouchehri David Mat David Mcanulty David McKay +David O'Rourke David P Hilton David Pelaez David R. Jenni @@ -503,14 +548,14 @@ Dennis Docter Derek Derek Derek Ch -Derek McGowan +Derek McGowan Deric Crago Deshi Xiao -devmeyster Devon Estes Devvyn Murphy Dharmit Shah Dhawal Yogesh Bhanushali +Dhilip Kumars Diego Romero Diego Siqueira Dieter Reuter @@ -522,9 +567,11 @@ Dimitris Rozakis Dimitry Andric Dinesh Subhraveti Ding Fei +dingwei Diogo Monica DiuDiugirl Djibril Koné +Djordje Lukic dkumor Dmitri Logvinenko Dmitri Shuralyov @@ -536,6 +583,8 @@ Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] Dolph Mathews Dominic Tubach Dominic Yin @@ -569,8 +618,9 @@ Eivind Uggedal Elan Ruusamäe Elango Sivanandam Elena Morozova -Eli Uriegas +Eli Uriegas Elias Faxö +Elias Koromilas Elias Probst Elijah Zupancic eluck @@ -580,6 +630,7 @@ Emil Hernvall Emily Maier Emily Rose Emir Ozer +Eng Zer Jun Enguerran Eohyung Lee epeterso @@ -588,6 +639,7 @@ Eric Curtin Eric G. Noriega Eric Hanchrow Eric Lee +Eric Mountain Eric Myhre Eric Paris Eric Rafaloff @@ -597,17 +649,21 @@ Eric Soderstrom Eric Yang Eric-Olivier Lamey Erica Windisch +Erich Cordoba Erik Bray Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen +Erik Sipsma Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh +Espen Suenson Ethan Bell Ethan Mosbaugh +Euan Harris Euan Kemp Eugen Krizo Eugene Yakubovich @@ -657,6 +713,7 @@ Fengtu Wang Ferenc Szabo Fernando Fero Volar +Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira @@ -673,6 +730,7 @@ Florin Patan fonglh Foysal Iqbal Francesc Campoy +Francesco Degrassi Francesco Mari Francis Chuang Francisco Carriedo @@ -681,18 +739,23 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -frankyang +Frank Yang Fred Lifton Frederick F. Kautz IV +Frederico F. de Oliveira Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy +Gabriel Goller +Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda @@ -707,12 +770,14 @@ Gaurav Singh Gaël PORTAY Genki Takiuchi GennadySpb +Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze George MacRorie George Xie Georgi Hristozov +Georgy Yakovlev Gereon Frey German DZ Gert van Valkenhoef @@ -724,6 +789,7 @@ Gildas Cuisinier Giovan Isa Musthofa gissehel Giuseppe Mazzotta +Giuseppe Scrivano Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington @@ -746,6 +812,8 @@ Guilhem Lettron Guilherme Salgado Guillaume Dufour Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI guoxiuyan Guri Gurjeet Singh @@ -755,12 +823,13 @@ gwx296173 Günter Zöchbauer Haichao Yang haikuoliu +haining.cao Hakan Özler Hamish Hutchings Hannes Ljungberg Hans Kristian Flaatten Hans Rødtang -Hao Shu Wei +Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers Harald Niesche @@ -792,15 +861,16 @@ Hu Tao HuanHuan Ye Huanzhong Zhang Huayi Zhang +Hugo Barrera Hugo Duncan Hugo Marisco <0x6875676f@gmail.com> +Hui Kang Hunter Blanks huqun Huu Nguyen -hyeongkyu.lee +Hyeongkyu Lee Hyzhou Zhy Iago López Galeiras -Ian Babrou Ian Bishop Ian Bull Ian Calvert @@ -817,6 +887,7 @@ Igor Dolzhikov Igor Karpovich Iliana Weller Ilkka Laukkanen +Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov @@ -847,7 +918,8 @@ Jaivish Kothari Jake Champlin Jake Moshenko Jake Sanders -jakedt +Jakub Drahos +Jakub Guzik James Allen James Carey James Carr @@ -859,11 +931,14 @@ James Lal James Mills James Nesbitt James Nugent +James Sanders James Turnbull James Watkins-Harvey Jamie Hannaford Jamshid Afshar +Jan Breig Jan Chren +Jan Götte Jan Keromnes Jan Koprowski Jan Pazdziora @@ -876,7 +951,6 @@ Januar Wayong Jared Biel Jared Hocutt Jaroslaw Zabiello -jaseg Jasmine Hegman Jason A. Donenfeld Jason Divock @@ -891,10 +965,11 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Javier Bassi jaxgeller -Jay Jay Jay Kamat +Jay Lim Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido @@ -912,12 +987,14 @@ Jeff Minard Jeff Nickoloff Jeff Silberman Jeff Welch +Jeff Zvier Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske Jeremy Chambers Jeremy Grosser +Jeremy Huntwork Jeremy Price Jeremy Qian Jeremy Unruh @@ -933,13 +1010,16 @@ Ji.Zhilong Jian Liao Jian Zhang Jiang Jinyang +Jianyong Wu Jie Luo Jie Ma Jihyun Hwang Jilles Oldenbeuving Jim Alateras +Jim Carroll Jim Ehrismann Jim Galasyn +Jim Lin Jim Minter Jim Perrin Jimmy Cuadra @@ -951,6 +1031,7 @@ Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +Joakim Roubert Joao Fernandes Joao Trindade Joe Beda @@ -1012,6 +1093,7 @@ Joost Cassee Jordan Arentsen Jordan Jennings Jordan Sissel +Jordi Massaguer Pla Jorge Marin Jorit Kleine-Möllhoff Jose Diaz-Gonzalez @@ -1044,12 +1126,15 @@ Julien Pervillé Julien Pivotto Julio Guerra Julio Montes +Jun Du Jun-Ru Chang +junxu Jussi Nummelin Justas Brazauskas Justen Martin Justin Cormack Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> Justin Menga Justin Plock Justin Simonelis @@ -1062,6 +1147,7 @@ Jörg Thalheim K. Heller Kai Blin Kai Qiang Wu (Kennan) +Kaijie Chen Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1082,6 +1168,7 @@ Kawsar Saiyeed Kay Yan kayrus Kazuhiro Sera +Kazuyoshi Kato Ke Li Ke Xu Kei Ohmura @@ -1096,6 +1183,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1122,20 +1210,22 @@ knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine +Konrad Ponichtera Konstantin Gribov Konstantin L Konstantin Pelykh +Kostadin Plachkov Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova Krystian Wojcicki -Kun Zhang Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Squizzato Kyle Wuolle kyu Lachlan Coote @@ -1151,20 +1241,25 @@ Lars R. Damerow Lars-Magnus Skog Laszlo Meszaros Laura Frank +Laurent Bernaille Laurent Erignoux Laurie Voss Leandro Siqueira +Lee Calcote Lee Chao <932819864@qq.com> Lee, Meng-Han -leeplay Lei Gong Lei Jitang +Leiiwang Len Weincier Lennie Leo Gallucci +Leonardo Nodari +Leonardo Taccari Leszek Kowalski Levi Blackstone Levi Gross +Levi Harrison Lewis Daly Lewis Marshall Lewis Peckover @@ -1173,11 +1268,12 @@ Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh +liangwei Liao Qingwei Lifubang Lihua Tang Lily Guo -limsy +limeidan Lin Lu LingFaKe Linus Heckemann @@ -1207,6 +1303,7 @@ Lucas Chi Lucas Molas Lucas Silvestre Luciano Mores +Luis Henrique Mulinari Luis Martínez de Bartolomé Izquierdo Luiz Svoboda Lukas Heeren @@ -1222,7 +1319,7 @@ Ma Shimiao Mabin Madhan Raj Mookkandy Madhav Puri -Madhu Venugopal +Madhu Venugopal Mageee Mahesh Tiyyagura malnick @@ -1255,12 +1352,14 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen +Mark Feit Mark Jeromin Mark McGranaghan Mark McKinstry Mark Milstein Mark Oates Mark Parker +Mark Vainomaa Mark West Markan Patel Marko Mikulicic @@ -1269,11 +1368,14 @@ Markus Fix Markus Kortlang Martijn Dwars Martijn van Oosterhout +Martin Braun +Martin Dojcak Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen Martin Muzatko Martin Redmond +Maru Newby Mary Anthony Masahito Zembutsu Masato Ohba @@ -1284,13 +1386,16 @@ Mathias Monnerville Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent +Mathieu Paturel Matt Apperson Matt Bachmann +Matt Bajor Matt Bentley Matt Haggard Matt Hoyle Matt McCormick Matt Moore +Matt Morrison <3maven@gmail.com> Matt Richardson Matt Rickard Matt Robenolt @@ -1305,12 +1410,14 @@ Matthew Riley Matthias Klumpp Matthias Kühnle Matthias Rampke +Matthieu Fronton Matthieu Hauglustaine Mattias Jernberg Mauricio Garavaglia mauriyouth Max Harmathy Max Shytikov +Max Timchenko Maxim Fedchyshyn Maxim Ivanov Maxim Kulkin @@ -1324,14 +1431,16 @@ Megan Kostick Mehul Kar Mei ChunTao Mengdi Gao +Menghui Chen Mert Yazıcıoğlu mgniu Micah Zoltu Michael A. Smith +Michael Beskin Michael Bridgen Michael Brown Michael Chiang -Michael Crosby +Michael Crosby Michael Currie Michael Friis Michael Gorsuch @@ -1340,6 +1449,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kuehn Michael Käufl Michael Neale Michael Nussbaum @@ -1349,23 +1459,29 @@ Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies +Michael Weidmann Michael West Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala +Michal Kostrzewa Michal Minář +Michal Rostecki Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz Michał Gryko +Michał Kosek Michiel de Jong Mickaël Fortunato Mickaël Remars Miguel Angel Fernández Miguel Morales +Miguel Perez Mihai Borobocea Mihuleacc Sergiu +Mikael Davranche Mike Brown Mike Bush Mike Casas @@ -1384,6 +1500,7 @@ Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1392,7 +1509,7 @@ Misty Stanley-Jones Mitch Capper Mizuki Urushida mlarcher -Mohammad Banikazemi +Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni @@ -1406,6 +1523,7 @@ Moysés Borges mrfly Mrunal Patel Muayyad Alsadi +Muhammad Zohaib Aslam Mustafa Akın Muthukumar R Máximo Cuadros @@ -1422,6 +1540,8 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Carlson +Nathan Herald Nathan Hsieh Nathan Kleyn Nathan LeClaire @@ -1445,6 +1565,7 @@ Nick Payne Nick Russo Nick Stenning Nick Stinemates +Nick Wood NickrenREN Nicola Kabar Nicolas Borboën @@ -1455,6 +1576,7 @@ Nicolas Kaiser Nicolas Sterchele Nicolas V Castet Nicolás Hock Isaza +Niel Drummond Nigel Poulton Nik Nyby Nikhil Chawla @@ -1472,6 +1594,7 @@ noducks Nolan Darilek Noriki Nakamura nponeccop +Nurahmadie Nuutti Kotivuori nzwsch O.S. Tezer @@ -1489,7 +1612,9 @@ Olle Jonsson Olli Janatuinen Olly Pomeroy Omri Shiv +Onur Filiz Oriol Francès +Oscar Bonilla <6f6231@gmail.com> Oskar Niburski Otto Kekäläinen Ouyang Liduo @@ -1502,10 +1627,12 @@ Pascal Borreli Pascal Hartig Patrick Böänziger Patrick Devine +Patrick Haas Patrick Hemmer Patrick Stapleton Patrik Cyvoct pattichen +Paul "TBBle" Hampson Paul paul Paul Annesley @@ -1520,6 +1647,7 @@ Paul Liljenberg Paul Morie Paul Nasrat Paul Weaver +Paulo Gomes Paulo Ribeiro Pavel Lobashov Pavel Matěja @@ -1530,6 +1658,7 @@ Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik Pawel Konczalski +Paweł Gronowski Peeyush Gupta Peggy Li Pei Su @@ -1537,6 +1666,7 @@ Peng Tao Penghan Wang Per Weijnitz perhapszzy@sina.com +Pete Woods Peter Bourgon Peter Braden Peter Bücker @@ -1552,8 +1682,10 @@ Peter Salvatore Peter Volpe Peter Waller Petr Švihlík +Petros Angelatos Phil -Phil Estes +Phil Estes +Phil Sphicas Phil Spitler Philip Alexander Etling Philip Monroe @@ -1570,21 +1702,25 @@ Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan -pixelistik +Piotr Karbowski Porjo Poul Kjeldager Sørensen Pradeep Chhetri Pradip Dhara +Pradipta Kr. Banerjee Prasanna Gautam Pratik Karki Prayag Verma Priya Wadhwa Projjol Banerji Przemek Hejman +Puneet Pruthi Pure White pysqz Qiang Huang +Qin TianHuan Qinglan Peng +Quan Tian qudongfang Quentin Brossard Quentin Perez @@ -1607,6 +1743,7 @@ Ramon van Alteren RaviTeja Pothana Ray Tsang ReadmeCritic +realityone Recursive Madman Reficul Regan McCooey @@ -1617,9 +1754,9 @@ Renaud Gaubert Rhys Hiltner Ri Xu Ricardo N Feliciano +Rich Horwood Rich Moyse Rich Seymour -Richard Richard Burnison Richard Harvey Richard Mathie @@ -1634,12 +1771,14 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich Rob Vesse Robert Bachmann Robert Bittle Robert Obryk Robert Schneider +Robert Shade Robert Stern Robert Terhaar Robert Wallis @@ -1652,6 +1791,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1666,11 +1806,14 @@ Roma Sokolov Roman Dudin Roman Mazur Roman Strashkin +Roman Volosatovs +Roman Zabaluev Ron Smits Ron Williams Rong Gao Rong Zhang Rongxiang Song +Rony Weng root root root @@ -1690,13 +1833,16 @@ Russ Magee Ryan Abrams Ryan Anderson Ryan Aslett +Ryan Barry Ryan Belgrave +Ryan Campbell Ryan Detzel Ryan Fowler Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto +Ryan Shea Ryan Simmen Ryan Stelly Ryan Thomas @@ -1706,9 +1852,9 @@ Ryan Zhang ryancooper7 RyanDeng Ryo Nakao +Ryoga Saito Rémy Greinhofer s. rannou -s00318865 Sabin Basyal Sachin Joshi Sagar Hani @@ -1728,8 +1874,9 @@ Sambuddha Basu Sami Wagiaalla Samuel Andaya Samuel Dion-Girardeau -Samuel Karp +Samuel Karp Samuel PHAN +sanchayanghosh Sandeep Bansal Sankar சங்கர் Sanket Saurav @@ -1745,6 +1892,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Percival Scott Stamp Scott Walls sdreyesg @@ -1757,6 +1905,9 @@ Sean P. Kane Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran SeongJae Park @@ -1776,12 +1927,15 @@ shaunol Shawn Landden Shawn Siefkas shawnhe +Shayan Pooya Shayne Wang Shekhar Gulati Sheng Yang Shengbo Song +Shengjing Zhu Shev Yan Shih-Yuan Lee +Shihao Xia Shijiang Wei Shijun Qin Shishir Mahajan @@ -1790,14 +1944,13 @@ Shourya Sarcar Shu-Wai Chow shuai-z Shukui Yang -Shuwei Hao Sian Lerk Lau +Siarhei Rasiukevich Sidhartha Mani sidharthamani Silas Sewell Silvan Jegen Simão Reis -Simei He Simon Barendse Simon Eskildsen Simon Ferquel @@ -1808,13 +1961,16 @@ Simon Vikstrom Sindhu S Sjoerd Langkemper skanehira +Smark Meng Solganik Alexander Solomon Hykes Song Gao Soshi Katsuta +Sotiris Salloumis Soulou Spencer Brown Spencer Smith +Spike Curtis Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu @@ -1830,6 +1986,7 @@ Stefan S. Stefan Scherer Stefan Staudenmeyer Stefan Weil +Steffen Butzer Stephan Spindler Stephen Benjamin Stephen Crosby @@ -1848,7 +2005,9 @@ Steven Iveson Steven Merrill Steven Richards Steven Taylor +Stéphane Este-Gracias Stig Larsson +Su Wang Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> @@ -1858,15 +2017,16 @@ Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade -Sylvain Baubeau +Sylvain Baubeau Sylvain Bellemare Sébastien Sébastien HOUZÉ Sébastien Luttringer Sébastien Stormacq +Sören Tempel Tabakhase Tadej Janež -TAGOMORI Satoshi +Takuto Sato tang0th Tangi Colin Tatsuki Sugiura @@ -1877,18 +2037,21 @@ Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju Tejesh Mehta +Terry Chu terryding77 <550147740@qq.com> -tgic Thatcher Peskens theadactyl Thell 'Bo' Fowler Thermionix +Thiago Alves Silva Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Gazagnaire +Thomas Graf Thomas Grainger Thomas Hansen +Thomas Ledos Thomas Leonard Thomas Léveil Thomas Orozco @@ -1899,11 +2062,13 @@ Thomas Swift Thomas Tanaka Thomas Texier Ti Zhou +Tiago Seabra Tianon Gravi Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low +Till Claassen Till Wegmüller Tim Tim Bart @@ -1915,11 +2080,14 @@ Tim Potter Tim Ruffles Tim Smith Tim Terhorst +Tim Wagner Tim Wang Tim Waugh Tim Wraight Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> timfeirg +Timo Rothenpieler Timothy Hobbs tjwebb123 tobe @@ -1928,6 +2096,7 @@ Tobias Bradtke Tobias Gesellchen Tobias Klauser Tobias Munk +Tobias Pfandzelter Tobias Schmidt Tobias Schwab Todd Crane @@ -1941,25 +2110,33 @@ Tom Fotherby Tom Howe Tom Hulihan Tom Maaswinkel +Tom Parker Tom Sweeney Tom Wilkie Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral Tomas Tomecek Tomasz Kopczynski Tomasz Lipinski Tomasz Nurkiewicz +Tomek Mańko Tommaso Visconti +Tomoya Tabuchi Tomáš Hrčka +tonic Tonny Xu Tony Abboud Tony Daws Tony Miller toogley Torstein Husebø +Toshiaki Makita Tõnis Tiigi Trace Andreason tracylihui <793912329@qq.com> -Trapier Marshall +Trapier Marshall Travis Cline Travis Thieman Trent Ogren @@ -1969,6 +2146,8 @@ Trevor Sullivan Trishna Guha Tristan Carel Troy Denton +Tudor Brindus +Ty Alexander Tycho Andersen Tyler Brock Tyler Brown @@ -1979,6 +2158,7 @@ Umesh Yadav Utz Bacher vagrant Vaidas Jablonskis +Valentin Kulesh vanderliang Velko Ivanov Veres Lajos @@ -1992,12 +2172,13 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K +Vikas Choudhary Vikram bir Singh Viktor Stanchev Viktor Vojnovski VinayRaghavanKS Vincent Batts -Vincent Bernat +Vincent Bernat Vincent Boulineau Vincent Demeester Vincent Giersch @@ -2017,9 +2198,9 @@ Vladimir Pouzanov Vladimir Rutsky Vladimir Varankin VladimirAus +Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) -waitingkuo Walter Leibbrandt Walter Stanish Wang Chao @@ -2034,6 +2215,7 @@ wanghuaiqing Ward Vandewege WarheadsSE Wassim Dhif +Wataru Ishida Wayne Chang Wayne Song Weerasak Chongnguluam @@ -2048,7 +2230,6 @@ Wendel Fleming Wenjun Tang Wenkai Yin wenlxie -Wentao Zhang Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang @@ -2068,16 +2249,22 @@ William Thurston Wilson Júnior Wing-Kam Wong WiseTrem +Wolfgang Nagele Wolfgang Powisch Wonjun Kim +WuLonghui xamyzhao +Xia Wu Xian Chaobo Xianglin Gao +Xianjie Xianlu Bird Xiao YongBiao +Xiao Zhang XiaoBing Jiang Xiaodong Liu Xiaodong Zhang +Xiaohua Ding Xiaoxi He Xiaoxu Chen Xiaoyu Zhang @@ -2092,12 +2279,16 @@ Xuecong Liao xuzhaokui Yadnyawalkya Tale Yahya +yalpul YAMADA Tsuyoshi Yamasaki Masahide Yan Feng +Yan Zhu Yang Bai +Yang Li Yang Pengfei yangchenliang +Yann Autissier Yanqiang Miao Yao Zaiyong Yash Murty @@ -2117,6 +2308,7 @@ Yosef Fertel You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF +Youfu Zhang Yu Changchun Yu Chengxia Yu Peng @@ -2124,14 +2316,18 @@ Yu-Ju Hong Yuan Sun Yuanhong Peng Yue Zhang +Yufei Xiong Yuhao Fang Yuichiro Kaneko +YujiOshima Yunxiang Huang Yurii Rashkovskii Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> Yves Junqueira Zac Dover Zach Borboa +Zach Gershman Zachary Jaffee Zain Memon Zaiste! @@ -2147,6 +2343,7 @@ Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo Zhenhai Gao Zhenkun Bi +ZhiPeng Lu zhipengzuo Zhou Hao Zhoulin Xie @@ -2164,7 +2361,6 @@ Zou Yu zqh Zuhayr Elahi Zunayed Ali -Álex González Álvaro Lázaro Átila Camurça Alves 尹吉峰 @@ -2173,3 +2369,4 @@ Zunayed Ali 慕陶 搏通 黄艳红00139573 +정재영 diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 5e6310fdcd..7df039b4cf 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -91,3 +91,12 @@ func GetConfigHome() (string, error) { } return filepath.Join(home, ".config"), nil } + +// GetLibHome returns $HOME/.local/lib +func GetLibHome() (string, error) { + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get HOME") + } + return filepath.Join(home, ".local/lib"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index fc48e674c1..11f1bec985 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -26,3 +26,8 @@ func GetDataHome() (string, error) { func GetConfigHome() (string, error) { return "", errors.New("homedir.GetConfigHome() is not supported on this system") } + +// GetLibHome is unsupported on non-linux system. +func GetLibHome() (string, error) { + return "", errors.New("homedir.GetLibHome() is not supported on this system") +} diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/builder.go b/vendor/github.com/go-jose/go-jose/v3/jwt/builder.go new file mode 100644 index 0000000000..7df270cc39 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/jwt/builder.go @@ -0,0 +1,334 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "bytes" + "reflect" + + "github.com/go-jose/go-jose/v3/json" + + "github.com/go-jose/go-jose/v3" +) + +// Builder is a utility for making JSON Web Tokens. Calls can be chained, and +// errors are accumulated until the final call to CompactSerialize/FullSerialize. +type Builder interface { + // Claims encodes claims into JWE/JWS form. Multiple calls will merge claims + // into single JSON object. If you are passing private claims, make sure to set + // struct field tags to specify the name for the JSON key to be used when + // serializing. + Claims(i interface{}) Builder + // Token builds a JSONWebToken from provided data. + Token() (*JSONWebToken, error) + // FullSerialize serializes a token using the JWS/JWE JSON Serialization format. + FullSerialize() (string, error) + // CompactSerialize serializes a token using the compact serialization format. + CompactSerialize() (string, error) +} + +// NestedBuilder is a utility for making Signed-Then-Encrypted JSON Web Tokens. +// Calls can be chained, and errors are accumulated until final call to +// CompactSerialize/FullSerialize. +type NestedBuilder interface { + // Claims encodes claims into JWE/JWS form. Multiple calls will merge claims + // into single JSON object. If you are passing private claims, make sure to set + // struct field tags to specify the name for the JSON key to be used when + // serializing. + Claims(i interface{}) NestedBuilder + // Token builds a NestedJSONWebToken from provided data. + Token() (*NestedJSONWebToken, error) + // FullSerialize serializes a token using the JSON Serialization format. + FullSerialize() (string, error) + // CompactSerialize serializes a token using the compact serialization format. + CompactSerialize() (string, error) +} + +type builder struct { + payload map[string]interface{} + err error +} + +type signedBuilder struct { + builder + sig jose.Signer +} + +type encryptedBuilder struct { + builder + enc jose.Encrypter +} + +type nestedBuilder struct { + builder + sig jose.Signer + enc jose.Encrypter +} + +// Signed creates builder for signed tokens. +func Signed(sig jose.Signer) Builder { + return &signedBuilder{ + sig: sig, + } +} + +// Encrypted creates builder for encrypted tokens. +func Encrypted(enc jose.Encrypter) Builder { + return &encryptedBuilder{ + enc: enc, + } +} + +// SignedAndEncrypted creates builder for signed-then-encrypted tokens. +// ErrInvalidContentType will be returned if encrypter doesn't have JWT content type. +func SignedAndEncrypted(sig jose.Signer, enc jose.Encrypter) NestedBuilder { + if contentType, _ := enc.Options().ExtraHeaders[jose.HeaderContentType].(jose.ContentType); contentType != "JWT" { + return &nestedBuilder{ + builder: builder{ + err: ErrInvalidContentType, + }, + } + } + return &nestedBuilder{ + sig: sig, + enc: enc, + } +} + +func (b builder) claims(i interface{}) builder { + if b.err != nil { + return b + } + + m, ok := i.(map[string]interface{}) + switch { + case ok: + return b.merge(m) + case reflect.Indirect(reflect.ValueOf(i)).Kind() == reflect.Struct: + m, err := normalize(i) + if err != nil { + return builder{ + err: err, + } + } + return b.merge(m) + default: + return builder{ + err: ErrInvalidClaims, + } + } +} + +func normalize(i interface{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + + raw, err := json.Marshal(i) + if err != nil { + return nil, err + } + + d := json.NewDecoder(bytes.NewReader(raw)) + d.SetNumberType(json.UnmarshalJSONNumber) + + if err := d.Decode(&m); err != nil { + return nil, err + } + + return m, nil +} + +func (b *builder) merge(m map[string]interface{}) builder { + p := make(map[string]interface{}) + for k, v := range b.payload { + p[k] = v + } + for k, v := range m { + p[k] = v + } + + return builder{ + payload: p, + } +} + +func (b *builder) token(p func(interface{}) ([]byte, error), h []jose.Header) (*JSONWebToken, error) { + return &JSONWebToken{ + payload: p, + Headers: h, + }, nil +} + +func (b *signedBuilder) Claims(i interface{}) Builder { + return &signedBuilder{ + builder: b.builder.claims(i), + sig: b.sig, + } +} + +func (b *signedBuilder) Token() (*JSONWebToken, error) { + sig, err := b.sign() + if err != nil { + return nil, err + } + + h := make([]jose.Header, len(sig.Signatures)) + for i, v := range sig.Signatures { + h[i] = v.Header + } + + return b.builder.token(sig.Verify, h) +} + +func (b *signedBuilder) CompactSerialize() (string, error) { + sig, err := b.sign() + if err != nil { + return "", err + } + + return sig.CompactSerialize() +} + +func (b *signedBuilder) FullSerialize() (string, error) { + sig, err := b.sign() + if err != nil { + return "", err + } + + return sig.FullSerialize(), nil +} + +func (b *signedBuilder) sign() (*jose.JSONWebSignature, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + return b.sig.Sign(p) +} + +func (b *encryptedBuilder) Claims(i interface{}) Builder { + return &encryptedBuilder{ + builder: b.builder.claims(i), + enc: b.enc, + } +} + +func (b *encryptedBuilder) CompactSerialize() (string, error) { + enc, err := b.encrypt() + if err != nil { + return "", err + } + + return enc.CompactSerialize() +} + +func (b *encryptedBuilder) FullSerialize() (string, error) { + enc, err := b.encrypt() + if err != nil { + return "", err + } + + return enc.FullSerialize(), nil +} + +func (b *encryptedBuilder) Token() (*JSONWebToken, error) { + enc, err := b.encrypt() + if err != nil { + return nil, err + } + + return b.builder.token(enc.Decrypt, []jose.Header{enc.Header}) +} + +func (b *encryptedBuilder) encrypt() (*jose.JSONWebEncryption, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + return b.enc.Encrypt(p) +} + +func (b *nestedBuilder) Claims(i interface{}) NestedBuilder { + return &nestedBuilder{ + builder: b.builder.claims(i), + sig: b.sig, + enc: b.enc, + } +} + +func (b *nestedBuilder) Token() (*NestedJSONWebToken, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return nil, err + } + + return &NestedJSONWebToken{ + enc: enc, + Headers: []jose.Header{enc.Header}, + }, nil +} + +func (b *nestedBuilder) CompactSerialize() (string, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return "", err + } + + return enc.CompactSerialize() +} + +func (b *nestedBuilder) FullSerialize() (string, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return "", err + } + + return enc.FullSerialize(), nil +} + +func (b *nestedBuilder) signAndEncrypt() (*jose.JSONWebEncryption, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + sig, err := b.sig.Sign(p) + if err != nil { + return nil, err + } + + p2, err := sig.CompactSerialize() + if err != nil { + return nil, err + } + + return b.enc.Encrypt([]byte(p2)) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go b/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go new file mode 100644 index 0000000000..286be1d2fe --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go @@ -0,0 +1,130 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "strconv" + "time" + + "github.com/go-jose/go-jose/v3/json" +) + +// Claims represents public claim values (as specified in RFC 7519). +type Claims struct { + Issuer string `json:"iss,omitempty"` + Subject string `json:"sub,omitempty"` + Audience Audience `json:"aud,omitempty"` + Expiry *NumericDate `json:"exp,omitempty"` + NotBefore *NumericDate `json:"nbf,omitempty"` + IssuedAt *NumericDate `json:"iat,omitempty"` + ID string `json:"jti,omitempty"` +} + +// NumericDate represents date and time as the number of seconds since the +// epoch, ignoring leap seconds. Non-integer values can be represented +// in the serialized format, but we round to the nearest second. +// See RFC7519 Section 2: https://tools.ietf.org/html/rfc7519#section-2 +type NumericDate int64 + +// NewNumericDate constructs NumericDate from time.Time value. +func NewNumericDate(t time.Time) *NumericDate { + if t.IsZero() { + return nil + } + + // While RFC 7519 technically states that NumericDate values may be + // non-integer values, we don't bother serializing timestamps in + // claims with sub-second accurancy and just round to the nearest + // second instead. Not convined sub-second accuracy is useful here. + out := NumericDate(t.Unix()) + return &out +} + +// MarshalJSON serializes the given NumericDate into its JSON representation. +func (n NumericDate) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatInt(int64(n), 10)), nil +} + +// UnmarshalJSON reads a date from its JSON representation. +func (n *NumericDate) UnmarshalJSON(b []byte) error { + s := string(b) + + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return ErrUnmarshalNumericDate + } + + *n = NumericDate(f) + return nil +} + +// Time returns time.Time representation of NumericDate. +func (n *NumericDate) Time() time.Time { + if n == nil { + return time.Time{} + } + return time.Unix(int64(*n), 0) +} + +// Audience represents the recipients that the token is intended for. +type Audience []string + +// UnmarshalJSON reads an audience from its JSON representation. +func (s *Audience) UnmarshalJSON(b []byte) error { + var v interface{} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + switch v := v.(type) { + case string: + *s = []string{v} + case []interface{}: + a := make([]string, len(v)) + for i, e := range v { + s, ok := e.(string) + if !ok { + return ErrUnmarshalAudience + } + a[i] = s + } + *s = a + default: + return ErrUnmarshalAudience + } + + return nil +} + +// MarshalJSON converts audience to json representation. +func (s Audience) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal(s[0]) + } + return json.Marshal([]string(s)) +} + +//Contains checks whether a given string is included in the Audience +func (s Audience) Contains(v string) bool { + for _, a := range s { + if a == v { + return true + } + } + return false +} diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go b/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go new file mode 100644 index 0000000000..4cf97b54e7 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go @@ -0,0 +1,22 @@ +/*- + * Copyright 2017 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jwt provides an implementation of the JSON Web Token standard. + +*/ +package jwt diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/errors.go b/vendor/github.com/go-jose/go-jose/v3/jwt/errors.go new file mode 100644 index 0000000000..27388e5449 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/jwt/errors.go @@ -0,0 +1,53 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import "errors" + +// ErrUnmarshalAudience indicates that aud claim could not be unmarshalled. +var ErrUnmarshalAudience = errors.New("go-jose/go-jose/jwt: expected string or array value to unmarshal to Audience") + +// ErrUnmarshalNumericDate indicates that JWT NumericDate could not be unmarshalled. +var ErrUnmarshalNumericDate = errors.New("go-jose/go-jose/jwt: expected number value to unmarshal NumericDate") + +// ErrInvalidClaims indicates that given claims have invalid type. +var ErrInvalidClaims = errors.New("go-jose/go-jose/jwt: expected claims to be value convertible into JSON object") + +// ErrInvalidIssuer indicates invalid iss claim. +var ErrInvalidIssuer = errors.New("go-jose/go-jose/jwt: validation failed, invalid issuer claim (iss)") + +// ErrInvalidSubject indicates invalid sub claim. +var ErrInvalidSubject = errors.New("go-jose/go-jose/jwt: validation failed, invalid subject claim (sub)") + +// ErrInvalidAudience indicated invalid aud claim. +var ErrInvalidAudience = errors.New("go-jose/go-jose/jwt: validation failed, invalid audience claim (aud)") + +// ErrInvalidID indicates invalid jti claim. +var ErrInvalidID = errors.New("go-jose/go-jose/jwt: validation failed, invalid ID claim (jti)") + +// ErrNotValidYet indicates that token is used before time indicated in nbf claim. +var ErrNotValidYet = errors.New("go-jose/go-jose/jwt: validation failed, token not valid yet (nbf)") + +// ErrExpired indicates that token is used after expiry time indicated in exp claim. +var ErrExpired = errors.New("go-jose/go-jose/jwt: validation failed, token is expired (exp)") + +// ErrIssuedInTheFuture indicates that the iat field is in the future. +var ErrIssuedInTheFuture = errors.New("go-jose/go-jose/jwt: validation field, token issued in the future (iat)") + +// ErrInvalidContentType indicates that token requires JWT cty header. +var ErrInvalidContentType = errors.New("go-jose/go-jose/jwt: expected content type to be JWT (cty header)") diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/jwt.go b/vendor/github.com/go-jose/go-jose/v3/jwt/jwt.go new file mode 100644 index 0000000000..8553fc50b0 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/jwt/jwt.go @@ -0,0 +1,133 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "fmt" + "strings" + + jose "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/json" +) + +// JSONWebToken represents a JSON Web Token (as specified in RFC7519). +type JSONWebToken struct { + payload func(k interface{}) ([]byte, error) + unverifiedPayload func() []byte + Headers []jose.Header +} + +type NestedJSONWebToken struct { + enc *jose.JSONWebEncryption + Headers []jose.Header +} + +// Claims deserializes a JSONWebToken into dest using the provided key. +func (t *JSONWebToken) Claims(key interface{}, dest ...interface{}) error { + b, err := t.payload(key) + if err != nil { + return err + } + + for _, d := range dest { + if err := json.Unmarshal(b, d); err != nil { + return err + } + } + + return nil +} + +// UnsafeClaimsWithoutVerification deserializes the claims of a +// JSONWebToken into the dests. For signed JWTs, the claims are not +// verified. This function won't work for encrypted JWTs. +func (t *JSONWebToken) UnsafeClaimsWithoutVerification(dest ...interface{}) error { + if t.unverifiedPayload == nil { + return fmt.Errorf("go-jose/go-jose: Cannot get unverified claims") + } + claims := t.unverifiedPayload() + for _, d := range dest { + if err := json.Unmarshal(claims, d); err != nil { + return err + } + } + return nil +} + +func (t *NestedJSONWebToken) Decrypt(decryptionKey interface{}) (*JSONWebToken, error) { + b, err := t.enc.Decrypt(decryptionKey) + if err != nil { + return nil, err + } + + sig, err := ParseSigned(string(b)) + if err != nil { + return nil, err + } + + return sig, nil +} + +// ParseSigned parses token from JWS form. +func ParseSigned(s string) (*JSONWebToken, error) { + sig, err := jose.ParseSigned(s) + if err != nil { + return nil, err + } + headers := make([]jose.Header, len(sig.Signatures)) + for i, signature := range sig.Signatures { + headers[i] = signature.Header + } + + return &JSONWebToken{ + payload: sig.Verify, + unverifiedPayload: sig.UnsafePayloadWithoutVerification, + Headers: headers, + }, nil +} + +// ParseEncrypted parses token from JWE form. +func ParseEncrypted(s string) (*JSONWebToken, error) { + enc, err := jose.ParseEncrypted(s) + if err != nil { + return nil, err + } + + return &JSONWebToken{ + payload: enc.Decrypt, + Headers: []jose.Header{enc.Header}, + }, nil +} + +// ParseSignedAndEncrypted parses signed-then-encrypted token from JWE form. +func ParseSignedAndEncrypted(s string) (*NestedJSONWebToken, error) { + enc, err := jose.ParseEncrypted(s) + if err != nil { + return nil, err + } + + contentType, _ := enc.Header.ExtraHeaders[jose.HeaderContentType].(string) + if strings.ToUpper(contentType) != "JWT" { + return nil, ErrInvalidContentType + } + + return &NestedJSONWebToken{ + enc: enc, + Headers: []jose.Header{enc.Header}, + }, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/validation.go b/vendor/github.com/go-jose/go-jose/v3/jwt/validation.go new file mode 100644 index 0000000000..09d8541f4c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/jwt/validation.go @@ -0,0 +1,120 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import "time" + +const ( + // DefaultLeeway defines the default leeway for matching NotBefore/Expiry claims. + DefaultLeeway = 1.0 * time.Minute +) + +// Expected defines values used for protected claims validation. +// If field has zero value then validation is skipped, with the exception of +// Time, where the zero value means "now." To skip validating them, set the +// corresponding field in the Claims struct to nil. +type Expected struct { + // Issuer matches the "iss" claim exactly. + Issuer string + // Subject matches the "sub" claim exactly. + Subject string + // Audience matches the values in "aud" claim, regardless of their order. + Audience Audience + // ID matches the "jti" claim exactly. + ID string + // Time matches the "exp", "nbf" and "iat" claims with leeway. + Time time.Time +} + +// WithTime copies expectations with new time. +func (e Expected) WithTime(t time.Time) Expected { + e.Time = t + return e +} + +// Validate checks claims in a token against expected values. +// A default leeway value of one minute is used to compare time values. +// +// The default leeway will cause the token to be deemed valid until one +// minute after the expiration time. If you're a server application that +// wants to give an extra minute to client tokens, use this +// function. If you're a client application wondering if the server +// will accept your token, use ValidateWithLeeway with a leeway <=0, +// otherwise this function might make you think a token is valid when +// it is not. +func (c Claims) Validate(e Expected) error { + return c.ValidateWithLeeway(e, DefaultLeeway) +} + +// ValidateWithLeeway checks claims in a token against expected values. A +// custom leeway may be specified for comparing time values. You may pass a +// zero value to check time values with no leeway, but you should note that +// numeric date values are rounded to the nearest second and sub-second +// precision is not supported. +// +// The leeway gives some extra time to the token from the server's +// point of view. That is, if the token is expired, ValidateWithLeeway +// will still accept the token for 'leeway' amount of time. This fails +// if you're using this function to check if a server will accept your +// token, because it will think the token is valid even after it +// expires. So if you're a client validating if the token is valid to +// be submitted to a server, use leeway <=0, if you're a server +// validation a token, use leeway >=0. +func (c Claims) ValidateWithLeeway(e Expected, leeway time.Duration) error { + if e.Issuer != "" && e.Issuer != c.Issuer { + return ErrInvalidIssuer + } + + if e.Subject != "" && e.Subject != c.Subject { + return ErrInvalidSubject + } + + if e.ID != "" && e.ID != c.ID { + return ErrInvalidID + } + + if len(e.Audience) != 0 { + for _, v := range e.Audience { + if !c.Audience.Contains(v) { + return ErrInvalidAudience + } + } + } + + // validate using the e.Time, or time.Now if not provided + validationTime := e.Time + if validationTime.IsZero() { + validationTime = time.Now() + } + + if c.NotBefore != nil && validationTime.Add(leeway).Before(c.NotBefore.Time()) { + return ErrNotValidYet + } + + if c.Expiry != nil && validationTime.Add(-leeway).After(c.Expiry.Time()) { + return ErrExpired + } + + // IssuedAt is optional but cannot be in the future. This is not required by the RFC, but + // something is misconfigured if this happens and we should not trust it. + if c.IssuedAt != nil && validationTime.Add(leeway).Before(c.IssuedAt.Time()) { + return ErrIssuedInTheFuture + } + + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index 08ec0158f6..960c93b5f4 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -38,6 +38,21 @@ type ConfigFile struct { Config Config `json:"config"` OSVersion string `json:"os.version,omitempty"` Variant string `json:"variant,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Platform attempts to generates a Platform from the ConfigFile fields. +func (cf *ConfigFile) Platform() *Platform { + if cf.OS == "" && cf.Architecture == "" && cf.OSVersion == "" && cf.Variant == "" && len(cf.OSFeatures) == 0 { + return nil + } + return &Platform{ + OS: cf.OS, + Architecture: cf.Architecture, + OSVersion: cf.OSVersion, + Variant: cf.Variant, + OSFeatures: cf.OSFeatures, + } } // History is one entry of a list recording how this container image was built. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go index 8edab24d4a..10665356ea 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -59,5 +59,6 @@ func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { func base() *v1.IndexManifest { return &v1.IndexManifest{ SchemaVersion: 2, + MediaType: types.OCIImageIndex, } } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go index 9ee91ee292..59ca402698 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -85,6 +85,47 @@ func (p Platform) Equals(o Platform) bool { stringSliceEqualIgnoreOrder(p.Features, o.Features) } +// Satisfies returns true if this Platform "satisfies" the given spec Platform. +// +// Note that this is different from Equals and that Satisfies is not reflexive. +// +// The given spec represents "requirements" such that any missing values in the +// spec are not compared. +// +// For OSFeatures and Features, Satisfies will return true if this Platform's +// fields contain a superset of the values in the spec's fields (order ignored). +func (p Platform) Satisfies(spec Platform) bool { + return satisfies(spec.OS, p.OS) && + satisfies(spec.Architecture, p.Architecture) && + satisfies(spec.Variant, p.Variant) && + satisfies(spec.OSVersion, p.OSVersion) && + satisfiesList(spec.OSFeatures, p.OSFeatures) && + satisfiesList(spec.Features, p.Features) +} + +func satisfies(want, have string) bool { + return want == "" || want == have +} + +func satisfiesList(want, have []string) bool { + if len(want) == 0 { + return true + } + + set := map[string]struct{}{} + for _, h := range have { + set[h] = struct{}{} + } + + for _, w := range want { + if _, ok := set[w]; !ok { + return false + } + } + + return true +} + // stringSliceEqual compares 2 string slices and returns if their contents are identical. func stringSliceEqual(a, b []string) bool { if len(a) != len(b) { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go index 7d7a0b3b68..e607df164a 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -69,7 +69,7 @@ func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) e return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...) } -// MultiWrite writes the contents of each image to the provided reader, in the compressed format. +// MultiWrite writes the contents of each image to the provided writer, in the compressed format. // The contents are written in the following format: // One manifest.json file at the top level containing information about several images. // One file for each layer, named after the layer's SHA. @@ -82,7 +82,7 @@ func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOpti return MultiRefWrite(refToImage, w, opts...) } -// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format. +// MultiRefWrite writes the contents of each image to the provided writer, in the compressed format. // The contents are written in the following format: // One manifest.json file at the top level containing information about several images. // One file for each layer, named after the layer's SHA. @@ -98,12 +98,13 @@ func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ... } } - size, mBytes, err := getSizeAndManifest(refToImage) + imageToTags := dedupRefToImage(refToImage) + size, mBytes, err := getSizeAndManifest(imageToTags) if err != nil { return sendUpdateReturn(o, err) } - return writeImagesToTar(refToImage, mBytes, size, w, o) + return writeImagesToTar(imageToTags, mBytes, size, w, o) } // sendUpdateReturn return the passed in error message, also sending on update channel, if it exists @@ -125,11 +126,10 @@ func sendProgressWriterReturn(pw *progressWriter, err error) error { } // writeImagesToTar writes the images to the tarball -func writeImagesToTar(refToImage map[name.Reference]v1.Image, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { +func writeImagesToTar(imageToTags map[v1.Image][]string, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { if w == nil { return sendUpdateReturn(o, errors.New("must pass valid writer")) } - imageToTags := dedupRefToImage(refToImage) tw := w var pw *progressWriter @@ -219,9 +219,7 @@ func writeImagesToTar(refToImage map[name.Reference]v1.Image, m []byte, size int } // calculateManifest calculates the manifest and optionally the size of the tar file -func calculateManifest(refToImage map[name.Reference]v1.Image) (m Manifest, err error) { - imageToTags := dedupRefToImage(refToImage) - +func calculateManifest(imageToTags map[v1.Image][]string) (m Manifest, err error) { if len(imageToTags) == 0 { return nil, errors.New("set of images is empty") } @@ -290,12 +288,13 @@ func calculateManifest(refToImage map[name.Reference]v1.Image) (m Manifest, err // CalculateSize calculates the expected complete size of the output tar file func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) { - size, _, err = getSizeAndManifest(refToImage) + imageToTags := dedupRefToImage(refToImage) + size, _, err = getSizeAndManifest(imageToTags) return size, err } -func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte, error) { - m, err := calculateManifest(refToImage) +func getSizeAndManifest(imageToTags map[v1.Image][]string) (int64, []byte, error) { + m, err := calculateManifest(imageToTags) if err != nil { return 0, nil, fmt.Errorf("unable to calculate manifest: %w", err) } @@ -304,7 +303,7 @@ func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte, return 0, nil, fmt.Errorf("could not marshall manifest to bytes: %w", err) } - size, err := calculateTarballSize(refToImage, mBytes) + size, err := calculateTarballSize(imageToTags, mBytes) if err != nil { return 0, nil, fmt.Errorf("error calculating tarball size: %w", err) } @@ -312,9 +311,7 @@ func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte, } // calculateTarballSize calculates the size of the tar file -func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte) (size int64, err error) { - imageToTags := dedupRefToImage(refToImage) - +func calculateTarballSize(imageToTags map[v1.Image][]string, mBytes []byte) (size int64, err error) { seenLayerDigests := make(map[string]struct{}) for img, name := range imageToTags { manifest, err := img.Manifest() @@ -386,7 +383,8 @@ func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { // ComputeManifest get the manifest.json that will be written to the tarball // for multiple references func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) { - return calculateManifest(refToImage) + imageToTags := dedupRefToImage(refToImage) + return calculateManifest(imageToTags) } // WriteOption a function option to pass to Write() diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go index 0cb1586f1e..a47b7475ed 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -99,6 +99,11 @@ func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { } in.RootFS.DeepCopyInto(&out.RootFS) in.Config.DeepCopyInto(&out.Config) + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -222,6 +227,11 @@ func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { (*out)[key] = val } } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } return } @@ -253,6 +263,11 @@ func (in *Manifest) DeepCopyInto(out *Manifest) { (*out)[key] = val } } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 0af08e65e6..7a008a4d23 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@latest + - go install mvdan.cc/garble@v0.9.3 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 3c00c1af96..958666ed89 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -9,7 +9,6 @@ This package provides various compression algorithms. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) @@ -17,6 +16,35 @@ This package provides various compression algorithms. # changelog +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + * Sept 16, 2022 (v1.15.10) * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c6..dac97e58a2 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9da..e36d9742f9 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 4d14542fac..cdc94856f2 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -365,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { m := uint32(0) if len(s.prevTable) > 0 { for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false } } return int(m), reuse } for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + s.symbolLen = uint16(i) + 1 } return int(m), false } @@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 42a237eac4..3c0b398c72 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { b, err := fse.Decompress(in[:iSize], s.fse) s.fse.Out = nil if err != nil { - return s, nil, err + return s, nil, fmt.Errorf("fse decompress returned: %w", err) } if len(b) > 255 { return s, nil, errors.New("corrupt input: output table too large") diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2ce..c4c7ab2d1f 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 298c4f8e97..05db94d39a 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index f52d1aed6f..2445bb4fe5 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -82,8 +82,9 @@ type blockDec struct { err error - // Check against this crc - checkCRC []byte + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. @@ -191,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -209,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } @@ -232,7 +234,7 @@ func (b *blockDec) decodeBuf(hist *history) error { if b.lowMem { b.dst = make([]byte, b.RLESize) } else { - b.dst = make([]byte, maxBlockSize) + b.dst = make([]byte, maxCompressedBlockSize) } } b.dst = b.dst[:b.RLESize] diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 5022e71c83..f6a240970d 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -4,7 +4,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "io" @@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error { } h.HeaderSize += 4 b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { return ErrMagicMismatch } if len(in) < 4 { @@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:size], in[size:] h.HeaderSize += int(size) - switch size { + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) - switch fcsSize { + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 78c10755f8..7113e69ee3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,7 +5,6 @@ package zstd import ( - "bytes" "context" "encoding/binary" "io" @@ -41,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -104,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -342,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -459,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) } - if !d.o.ignoreChecksum && len(next.b) > 0 { + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { n, err := d.current.crc.Write(next.b) if err == nil { if n != len(next.b) { @@ -467,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } } } - if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { - got := d.current.crc.Sum64() - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(got)) - if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { if debugDecoder { - println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) } d.current.err = ErrCRCMismatch } else { if debugDecoder { - println("CRC ok", tmp[:]) + printf("CRC ok %08x\n", got) } } } @@ -494,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -770,7 +757,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch if block.lowMem { block.dst = make([]byte, block.RLESize) } else { - block.dst = make([]byte, maxBlockSize) + block.dst = make([]byte, maxCompressedBlockSize) } } block.dst = block.dst[:block.RLESize] @@ -864,13 +851,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -918,18 +900,22 @@ decodeStream: println("next block returned error:", err) } dec.err = err - dec.checkCRC = nil + dec.hasCRC = false if dec.Last && frame.HasCheckSum && err == nil { crc, err := frame.rawInput.readSmall(4) - if err != nil { + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } println("CRC missing?", err) dec.err = err - } - var tmp [4]byte - copy(tmp[:], crc) - dec.checkCRC = tmp[:] - if debugDecoder { - println("found crc to check:", dec.checkCRC) + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } } } err = dec.err @@ -948,3 +934,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69c..07a90dd7af 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index a36ae83ef5..ca0951452e 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,7 +1,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -20,7 +19,10 @@ type dict struct { content []byte } -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { @@ -30,14 +32,38 @@ func (d *dict) ID() uint32 { return d.id } -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { if d == nil { return 0 } return len(d.content) } +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) { ofDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}}, } - if !bytes.Equal(b[:4], dictMagic[:]) { + if string(b[:4]) != dictMagic { return nil, ErrMagicMismatch } d.id = binary.LittleEndian.Uint32(b[4:8]) @@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) { var err error d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { - return nil, err + return nil, fmt.Errorf("loading literal table: %w", err) } d.litEnc.Reuse = huff0.ReusePolicyMust @@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) { return &d, nil } + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 15ae8ee807..e008b99298 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -16,6 +16,7 @@ type fastBase struct { cur int32 // maximum offset. Should be at least 2x block size. maxMatchOff int32 + bufferReset int32 hist []byte crc *xxhash.Digest tmp [8]byte @@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc { } func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) } // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { @@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } - a := src[s:] - b := src[t:] - b = b[:len(a)] - end := int32((len(a) >> 3) << 3) - for i := int32(0); i < end; i += 8 { - if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { - return i + int32(bits.TrailingZeros64(diff)>>3) - } - } - - a = a[end:] - b = b[end:] - for i := range a { - if a[i] != b[i] { - return int32(i) + end - } - } - return int32(len(a)) + end + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. @@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { if singleBlock { e.lowMem = true } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) e.lowMem = low } // We offset current position so everything will be out of reach. // If above reset line, history will be purged. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += e.maxMatchOff + int32(len(e.hist)) } e.hist = e.hist[:0] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index dbbb88d92b..830f5ba74a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -193,8 +189,8 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + bestOf := func(a, b *match) *match { + if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { return a } return b @@ -220,22 +216,26 @@ encodeLoop: return m } - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) + best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) if canRepeat && best.length < goodEnough { cv32 := uint32(cv >> 8) spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) if best.length > 0 { cv32 = uint32(cv >> 24) spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) } } // Load next and check... @@ -262,26 +262,33 @@ encodeLoop: candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) if false { // Short at s+3. // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + best = bestOf(best, &m) } // See if we can find a better match by checking where the current best ends. // Use that offset to see if we can find a better full match. if sAt := best.s + best.length; sAt < sLimit { nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd := bestOf(best, &m) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd = bestOf(bestEnd, &m) } best = bestEnd } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index d70e3fd3d3..8582f31a7c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 1f4a9a2455..7d425109ad 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -685,7 +681,7 @@ encodeLoop: } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 181edc02b6..315b1a8f2f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -304,13 +304,13 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) if debugEncoder { - if len(src) > maxBlockSize { + if len(src) > maxCompressedBlockSize { panic("src too big") } } // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -538,7 +538,7 @@ encodeLoop: println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { return } // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } + e.table = [tableSize]tableEntry{} e.cur = e.maxMatchOff break } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 7aaaedb23e..65c6c36dc1 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -8,6 +8,7 @@ import ( "crypto/rand" "fmt" "io" + "math" rdebug "runtime/debug" "sync" @@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index a7c5e1aac4..8e15be2f7f 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -3,6 +3,8 @@ package zstd import ( "errors" "fmt" + "math" + "math/bits" "runtime" "strings" ) @@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder { switch o.level { case SpeedFastest: if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedDefault: if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} case SpeedBetterCompression: if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index b6c5054176..d8e8a05bd7 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -5,7 +5,7 @@ package zstd import ( - "bytes" + "encoding/binary" "encoding/hex" "errors" "io" @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -43,9 +43,9 @@ const ( MaxWindowSize = 1 << 29 ) -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" ) func newFrameDec(o decoderOptions) *frameDec { @@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error { copy(signature[1:], b) } - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) } // Break if not skippable frame. break @@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error { return err } } - if !bytes.Equal(signature[:], frameMagic) { + if string(signature[:]) != frameMagic { if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) } return ErrMagicMismatch } @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error { return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: @@ -261,11 +257,16 @@ func (d *frameDec) reset(br byteBuffer) error { } d.history.windowSize = int(d.WindowSize) if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or very small window size. + // Alloc 2x window size if not low-mem, or window size below 2MB. d.history.allocFrameBuffer = d.history.windowSize * 2 } else { - // Alloc with one additional block - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } } if debugDecoder { @@ -300,7 +301,7 @@ func (d *frameDec) checkCRC() error { } // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) + buf, err := d.rawInput.readSmall(4) if err != nil { println("CRC missing?", err) return err @@ -310,22 +311,17 @@ func (d *frameDec) checkCRC() error { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) - if !bytes.Equal(tmp[:], want) { + if got != want { if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) + printf("CRC check failed: got %08x, want %08x\n", got, want) } return ErrCRCMismatch } if debugDecoder { - println("CRC ok", tmp[:]) + printf("CRC ok %08x\n", got) } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md index 69aa3bb587..777290d44c 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -2,12 +2,7 @@ VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. ## Benchmarks Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 2c112a0ab1..fc40c82001 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -18,19 +18,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -52,10 +44,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index cea1785619..ddb63aa91b 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,3 +1,4 @@ +//go:build !appengine && gc && !purego && !noasm // +build !appengine // +build gc // +build !purego @@ -5,212 +6,205 @@ #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 4d64a17d69..17901e0804 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -1,13 +1,17 @@ -// +build gc,!purego,!noasm +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm #include "textflag.h" -// Register allocation. +// Registers: #define digest R1 -#define h R2 // Return value. -#define p R3 // Input pointer. -#define len R4 -#define nblocks R5 // len / 32. +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 #define prime1 R7 #define prime2 R8 #define prime3 R9 @@ -25,60 +29,52 @@ #define round(acc, x) \ MADD prime2, acc, x, acc \ ROR $64-31, acc \ - MUL prime1, acc \ + MUL prime1, acc -// x = round(0, x). +// round0 performs the operation x = round(0, x). #define round0(x) \ MUL prime2, x \ ROR $64-31, x \ - MUL prime1, x \ - -#define mergeRound(x) \ - round0(x) \ - EOR x, h \ - MADD h, prime4, prime1, h \ - -// Update v[1-4] with 32-byte blocks. Assumes len >= 32. -#define blocksLoop() \ - LSR $5, len, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 32(p), (x1, x2) \ - round(v1, x1) \ - LDP -16(p), (x3, x4) \ - round(v2, x2) \ - SUB $1, nblocks \ - round(v3, x3) \ - round(v4, x4) \ - CBNZ nblocks, loop \ - -// The primes are repeated here to ensure that they're stored -// in a contiguous array, so we can load them with LDP. -DATA primes<> +0(SB)/8, $11400714785074694791 -DATA primes<> +8(SB)/8, $14029467366897019727 -DATA primes<>+16(SB)/8, $1609587929392839161 -DATA primes<>+24(SB)/8, $9650029242287828579 -DATA primes<>+32(SB)/8, $2870177450012600261 -GLOBL primes<>(SB), NOPTR+RODATA, $40 + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 - LDP b_base+0(FP), (p, len) +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) - LDP primes<> +0(SB), (prime1, prime2) - LDP primes<>+16(SB), (prime3, prime4) - MOVD primes<>+32(SB), prime5 + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 - CMP $32, len - CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } - BLO afterLoop + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop ADD prime1, prime2, v1 MOVD prime2, v2 MOVD $0, v3 NEG prime1, v4 - blocksLoop() + blockLoop() ROR $64-1, v1, x1 ROR $64-7, v2, x2 @@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 ADD x3, x4 ADD x2, x4, h - mergeRound(v1) - mergeRound(v2) - mergeRound(v3) - mergeRound(v4) + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) afterLoop: - ADD len, h + ADD n, h - TBZ $4, len, try8 + TBZ $4, n, try8 LDP.P 16(p), (x1, x2) round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. ROR $64-27, h EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h round0(x2) ROR $64-27, h - EOR x2 @> 64-27, h + EOR x2 @> 64-27, h, h MADD h, prime4, prime1, h try8: - TBZ $3, len, try4 + TBZ $3, n, try4 MOVD.P 8(p), x1 round0(x1) ROR $64-27, h - EOR x1 @> 64-27, h + EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h try4: - TBZ $2, len, try2 + TBZ $2, n, try2 MOVWU.P 4(p), x2 MUL prime1, x2 ROR $64-23, h - EOR x2 @> 64-23, h + EOR x2 @> 64-23, h, h MADD h, prime3, prime2, h try2: - TBZ $1, len, try1 + TBZ $1, n, try1 MOVHU.P 2(p), x3 AND $255, x3, x1 LSR $8, x3, x2 MUL prime5, x1 ROR $64-11, h - EOR x1 @> 64-11, h + EOR x1 @> 64-11, h, h MUL prime1, h MUL prime5, x2 ROR $64-11, h - EOR x2 @> 64-11, h + EOR x2 @> 64-11, h, h MUL prime1, h try1: - TBZ $0, len, end + TBZ $0, n, finalize MOVBU (p), x4 MUL prime5, x4 ROR $64-11, h - EOR x4 @> 64-11, h + EOR x4 @> 64-11, h, h MUL prime1, h -end: +finalize: EOR h >> 33, h MUL prime2, h EOR h >> 29, h @@ -163,24 +163,22 @@ end: RET // func writeBlocks(d *Digest, b []byte) int -// -// Assumes len(b) >= 32. -TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 - LDP primes<>(SB), (prime1, prime2) +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. MOVD d+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) - LDP b_base+8(FP), (p, len) + LDP b_base+8(FP), (p, n) - blocksLoop() + blockLoop() // Store updated state. STP (v1, v2), 0(digest) STP (v3, v4), 16(digest) - BIC $31, len - MOVD len, ret+32(FP) + BIC $31, n + MOVD n, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 1a1fac9c26..d4221edf4f 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -13,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(d *Digest, b []byte) int +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 209cb4a999..0be16cefc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 52e5703c26..b94993a072 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -320,10 +320,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -617,10 +613,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -897,10 +889,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -1152,10 +1140,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1389,8 +1373,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1402,8 +1385,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1747,8 +1729,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1760,8 +1741,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 3eb3f1c826..5ffa82f5ac 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -36,9 +36,6 @@ const forcePreDef = false // zstdMinMatch is the minimum zstd match length. const zstdMinMatch = 3 -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize - // fcsUnknown is used for unknown frame content size. const fcsUnknown = math.MaxUint64 @@ -75,7 +72,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. @@ -110,26 +106,25 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum length. +// matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 } + n += 8 } - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] for i := range a { if a[i] != b[i] { - return i + checked + break } + n++ } - return len(a) + checked + return n + } func load3232(b []byte, i int32) uint32 { @@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - type byter interface { Bytes() []byte Len() int diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go index fb1d5918b2..abc860a491 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" "reflect" - "regexp" "sort" + "strings" ) /* @@ -18,8 +18,12 @@ escaping backslashes ("\") and double quotes (") and wrapping the resulting string in double quotes ("). */ func encodeCanonicalString(s string) string { - re := regexp.MustCompile(`([\"\\])`) - return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) + // Escape backslashes + s = strings.ReplaceAll(s, "\\", "\\\\") + // Escape double quotes + s = strings.ReplaceAll(s, "\"", "\\\"") + // Wrap with double quotes + return fmt.Sprintf("\"%s\"", s) } /* @@ -28,16 +32,7 @@ object according to the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed *bytes.Buffer. If canonicalization fails it returns an error. */ -func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { - // Since this function is called recursively, we use panic if an error occurs - // and recover in a deferred function, which is always called before - // returning. There we set the error that is returned eventually. - defer func() { - if r := recover(); r != nil { - err = errors.New(r.(string)) - } - }() - +func encodeCanonical(obj interface{}, result *strings.Builder) (err error) { switch objAsserted := obj.(type) { case string: result.WriteString(encodeCanonicalString(objAsserted)) @@ -90,10 +85,9 @@ func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { // Canonicalize map for i, key := range mapKeys { - // Note: `key` must be a `string` (see `case map[string]interface{}`) and - // canonicalization of strings cannot err out (see `case string`), thus - // no error handling is needed here. - encodeCanonical(key, result) + if err := encodeCanonical(key, result); err != nil { + return err + } result.WriteString(":") if err := encodeCanonical(objAsserted[key], result); err != nil { @@ -120,7 +114,16 @@ slice. It uses the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte slice is nil and the second return value contains the error. */ -func EncodeCanonical(obj interface{}) ([]byte, error) { +func EncodeCanonical(obj interface{}) (out []byte, err error) { + // We use panic if an error occurs and recover in a deferred function, + // which is always called before returning. + // There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + // FIXME: Terrible hack to turn the passed struct into a map, converting // the struct's variable names to the json key names defined in the struct data, err := json.Marshal(obj) @@ -136,10 +139,13 @@ func EncodeCanonical(obj interface{}) ([]byte, error) { } // Create a buffer and write the canonicalized JSON bytes to it - var result bytes.Buffer + var result strings.Builder + // Allocate output result buffer with the input size. + result.Grow(len(data)) + // Recursively encode the jsonmap if err := encodeCanonical(jsonMap, &result); err != nil { return nil, err } - return result.Bytes(), nil + return []byte(result.String()), nil } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go index 3dc05a4294..139128d43c 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -5,6 +5,7 @@ https://github.com/secure-systems-lab/dsse package dsse import ( + "context" "encoding/base64" "errors" "fmt" @@ -77,7 +78,7 @@ using the current algorithm, and the key used (if applicable). For an example see EcdsaSigner in sign_test.go. */ type Signer interface { - Sign(data []byte) ([]byte, error) + Sign(ctx context.Context, data []byte) ([]byte, error) KeyID() (string, error) } @@ -143,7 +144,7 @@ Returned is an envelope as defined here: https://github.com/secure-systems-lab/dsse/blob/master/envelope.md One signature will be added for each Signer in the EnvelopeSigner. */ -func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { +func (es *EnvelopeSigner) SignPayload(ctx context.Context, payloadType string, body []byte) (*Envelope, error) { var e = Envelope{ Payload: base64.StdEncoding.EncodeToString(body), PayloadType: payloadType, @@ -152,7 +153,7 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop paeEnc := PAE(payloadType, body) for _, signer := range es.providers { - sig, err := signer.Sign(paeEnc) + sig, err := signer.Sign(ctx, paeEnc) if err != nil { return nil, err } @@ -176,8 +177,8 @@ Any domain specific validation such as parsing the decoded body and validating the payload type is left out to the caller. Verify returns a list of accepted keys each including a keyid, public and signiture of the accepted provider keys. */ -func (es *EnvelopeSigner) Verify(e *Envelope) ([]AcceptedKey, error) { - return es.ev.Verify(e) +func (es *EnvelopeSigner) Verify(ctx context.Context, e *Envelope) ([]AcceptedKey, error) { + return es.ev.Verify(ctx, e) } /* diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index ead1c32ca8..763344eda4 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -1,6 +1,7 @@ package dsse import ( + "context" "crypto" "errors" "fmt" @@ -15,7 +16,7 @@ must perform the same steps. If KeyID returns successfully, only signature matching the key ID will be verified. */ type Verifier interface { - Verify(data, sig []byte) error + Verify(ctx context.Context, data, sig []byte) error KeyID() (string, error) Public() crypto.PublicKey } @@ -31,7 +32,7 @@ type AcceptedKey struct { Sig Signature } -func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { +func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]AcceptedKey, error) { if e == nil { return nil, errors.New("cannot verify a nil envelope") } @@ -78,7 +79,7 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { continue } - err = v.Verify(paeEnc, sig) + err = v.Verify(ctx, paeEnc, sig) if err != nil { continue } @@ -104,11 +105,11 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { // Sanity if with some reflect magic this happens. if ev.threshold <= 0 || ev.threshold > len(ev.providers) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } if len(usedKeyids) < ev.threshold { - return acceptedKeys, errors.New(fmt.Sprintf("Accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)) + return acceptedKeys, fmt.Errorf("accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold) } return acceptedKeys, nil @@ -121,7 +122,7 @@ func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { if threshold <= 0 || threshold > len(p) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } ev := EnvelopeVerifier{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go index c5251c3809..38df9700e1 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go @@ -21,9 +21,9 @@ import ( "errors" "github.com/coreos/go-oidc/v3/oidc" + "github.com/go-jose/go-jose/v3" soauth "github.com/sigstore/sigstore/pkg/oauth" "golang.org/x/oauth2" - "gopkg.in/square/go-jose.v2" ) const ( diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go index 368276ced5..5f801095e3 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go @@ -18,10 +18,12 @@ package dsse import ( "bytes" + "context" "crypto" "errors" "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" ) // SignerAdapter wraps a `sigstore/signature.Signer`, making it compatible with `go-securesystemslib/dsse.Signer`. @@ -33,12 +35,12 @@ type SignerAdapter struct { } // Sign implements `go-securesystemslib/dsse.Signer` -func (a *SignerAdapter) Sign(data []byte) ([]byte, error) { - return a.SignatureSigner.SignMessage(bytes.NewReader(data), a.Opts...) +func (a *SignerAdapter) Sign(ctx context.Context, data []byte) ([]byte, error) { + return a.SignatureSigner.SignMessage(bytes.NewReader(data), append(a.Opts, options.WithContext(ctx))...) } // Verify disabled `go-securesystemslib/dsse.Verifier` -func (a *SignerAdapter) Verify(data, sig []byte) error { +func (a *SignerAdapter) Verify(_ context.Context, _, _ []byte) error { return errors.New("Verify disabled") } @@ -60,8 +62,8 @@ type VerifierAdapter struct { } // Verify implements `go-securesystemslib/dsse.Verifier` -func (a *VerifierAdapter) Verify(data, sig []byte) error { - return a.SignatureVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data)) +func (a *VerifierAdapter) Verify(ctx context.Context, data, sig []byte) error { + return a.SignatureVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithContext(ctx)) } // Public implements `go-securesystemslib/dsse.Verifier` diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go index f3a22da4e8..0ba565405a 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go @@ -17,6 +17,7 @@ package dsse import ( "bytes" + "context" "crypto" "encoding/base64" "encoding/json" @@ -109,7 +110,8 @@ func (w *wrappedVerifier) VerifySignature(s, _ io.Reader, opts ...signature.Veri if err != nil { return err } - _, err = verifier.Verify(&env) + + _, err = verifier.Verify(context.Background(), &env) return err } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go index e48312edce..cdf0d494e9 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go @@ -16,6 +16,7 @@ package dsse import ( + "context" "crypto" "encoding/json" "errors" @@ -77,7 +78,7 @@ func (wL *wrappedMultiSigner) SignMessage(r io.Reader, opts ...signature.SignOpt return nil, err } - env, err := envSigner.SignPayload(wL.payloadType, p) + env, err := envSigner.SignPayload(context.Background(), wL.payloadType, p) if err != nil { return nil, err } @@ -143,7 +144,7 @@ func (wL *wrappedMultiVerifier) VerifySignature(s, _ io.Reader, opts ...signatur return err } - _, err = envVerifier.Verify(&env) + _, err = envVerifier.Verify(context.Background(), &env) return err } diff --git a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go index 4c6c2a03b7..e75d609e89 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go @@ -43,7 +43,7 @@ import ( const ( // DefaultRemoteRoot is the default remote TUF root location. - DefaultRemoteRoot = "https://sigstore-tuf-root.storage.googleapis.com" + DefaultRemoteRoot = "https://tuf-repo-cdn.sigstore.dev" // TufRootEnv is the name of the environment variable that locates an alternate local TUF root location. TufRootEnv = "TUF_ROOT" diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go index 3d6382f488..80d83ea243 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go @@ -7,10 +7,10 @@ import ( "io/ioutil" "sync" + "github.com/go-jose/go-jose/v3" "github.com/spiffe/go-spiffe/v2/internal/jwtutil" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/zeebo/errs" - "gopkg.in/square/go-jose.v2" ) var ( diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go index 9d2a8d8d82..77b6a5a05a 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go @@ -9,13 +9,13 @@ import ( "sync" "time" + "github.com/go-jose/go-jose/v3" "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" "github.com/spiffe/go-spiffe/v2/internal/jwtutil" "github.com/spiffe/go-spiffe/v2/internal/x509util" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/zeebo/errs" - "gopkg.in/square/go-jose.v2" ) const ( diff --git a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go index b4496ebd6b..d5fd87acfe 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go @@ -149,6 +149,11 @@ type X509SVID struct { X509SvidKey []byte `protobuf:"bytes,3,opt,name=x509_svid_key,json=x509SvidKey,proto3" json:"x509_svid_key,omitempty"` // Required. ASN.1 DER encoded X.509 bundle for the trust domain. Bundle []byte `protobuf:"bytes,4,opt,name=bundle,proto3" json:"bundle,omitempty"` + // Optional. An operator-specified string used to provide guidance on how this + // identity should be used by a workload when more than one SVID is returned. + // For example, `internal` and `external` to indicate an SVID for internal or + // external use, respectively. + Hint string `protobuf:"bytes,5,opt,name=hint,proto3" json:"hint,omitempty"` } func (x *X509SVID) Reset() { @@ -211,6 +216,13 @@ func (x *X509SVID) GetBundle() []byte { return nil } +func (x *X509SVID) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + // The X509BundlesRequest message conveys parameters for requesting X.509 // bundles. There are currently no such parameters. type X509BundlesRequest struct { @@ -429,6 +441,11 @@ type JWTSVID struct { SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` // Required. Encoded JWT using JWS Compact Serialization. Svid string `protobuf:"bytes,2,opt,name=svid,proto3" json:"svid,omitempty"` + // Optional. An operator-specified string used to provide guidance on how this + // identity should be used by a workload when more than one SVID is returned. + // For example, `internal` and `external` to indicate an SVID for internal or + // external use, respectively. + Hint string `protobuf:"bytes,3,opt,name=hint,proto3" json:"hint,omitempty"` } func (x *JWTSVID) Reset() { @@ -477,6 +494,13 @@ func (x *JWTSVID) GetSvid() string { return "" } +func (x *JWTSVID) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + // The JWTBundlesRequest message conveys parameters for requesting JWT bundles. // There are currently no such parameters. type JWTBundlesRequest struct { @@ -709,7 +733,7 @@ var file_workload_proto_rawDesc = []byte{ 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x80, 0x01, 0x0a, 0x08, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x94, 0x01, 0x0a, 0x08, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, @@ -717,76 +741,79 @@ var file_workload_proto_rawDesc = []byte{ 0x35, 0x30, 0x39, 0x5f, 0x73, 0x76, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x78, 0x35, 0x30, 0x39, 0x53, 0x76, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x58, 0x35, 0x30, 0x39, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa0, 0x01, - 0x0a, 0x13, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x49, 0x0a, 0x0e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x22, 0x31, 0x0a, 0x0f, 0x4a, - 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, - 0x0a, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, - 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x22, 0x3a, - 0x0a, 0x07, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, - 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, - 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x76, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x4a, 0x57, - 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x8c, 0x01, 0x0a, 0x12, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, - 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48, - 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, - 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, - 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x73, 0x76, 0x69, 0x64, 0x22, 0x67, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, - 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, - 0x73, 0x32, 0xc3, 0x02, 0x0a, 0x11, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x57, 0x6f, 0x72, 0x6b, - 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x50, 0x49, 0x12, 0x36, 0x0a, 0x0d, 0x46, 0x65, 0x74, 0x63, 0x68, - 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x12, 0x10, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x53, - 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x58, 0x35, 0x30, - 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, - 0x3f, 0x0a, 0x10, 0x46, 0x65, 0x74, 0x63, 0x68, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, - 0x6c, 0x65, 0x73, 0x12, 0x13, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, - 0x12, 0x31, 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, - 0x12, 0x0f, 0x2e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x10, 0x2e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0f, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4a, 0x57, 0x54, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x12, 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, - 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x4a, 0x57, 0x54, - 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, - 0x01, 0x12, 0x44, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, - 0x53, 0x56, 0x49, 0x44, 0x12, 0x17, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, - 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x67, 0x6f, 0x2d, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x3b, - 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x14, 0x0a, 0x12, 0x58, + 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x6c, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3b, 0x0a, 0x07, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x22, + 0x31, 0x0a, 0x0f, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x08, 0x2e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x05, 0x73, 0x76, 0x69, + 0x64, 0x73, 0x22, 0x4e, 0x0a, 0x07, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x1b, 0x0a, + 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x76, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x76, 0x69, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x69, + 0x6e, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x4a, 0x57, 0x54, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, + 0x0a, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x42, 0x75, + 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x76, 0x69, 0x64, + 0x22, 0x67, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, + 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6c, 0x61, 0x69, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x32, 0xc3, 0x02, 0x0a, 0x11, 0x53, 0x70, + 0x69, 0x66, 0x66, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x50, 0x49, 0x12, + 0x36, 0x0a, 0x0d, 0x46, 0x65, 0x74, 0x63, 0x68, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, + 0x12, 0x10, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x3f, 0x0a, 0x10, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x13, 0x2e, 0x58, 0x35, + 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x14, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x31, 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x0f, 0x2e, 0x4a, 0x57, 0x54, 0x53, 0x56, + 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x4a, 0x57, 0x54, 0x53, + 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0f, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x12, + 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x44, 0x0a, 0x0f, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x17, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, + 0x69, 0x66, 0x66, 0x65, 0x2f, 0x67, 0x6f, 0x2d, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x76, + 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x77, + 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x3b, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto index 64b66c8b6f..b9b8db0153 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto +++ b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto @@ -74,6 +74,12 @@ message X509SVID { // Required. ASN.1 DER encoded X.509 bundle for the trust domain. bytes bundle = 4; + + // Optional. An operator-specified string used to provide guidance on how this + // identity should be used by a workload when more than one SVID is returned. + // For example, `internal` and `external` to indicate an SVID for internal or + // external use, respectively. + string hint = 5; } // The X509BundlesRequest message conveys parameters for requesting X.509 @@ -115,6 +121,12 @@ message JWTSVID { // Required. Encoded JWT using JWS Compact Serialization. string svid = 2; + + // Optional. An operator-specified string used to provide guidance on how this + // identity should be used by a workload when more than one SVID is returned. + // For example, `internal` and `external` to indicate an SVID for internal or + // external use, respectively. + string hint = 3; } // The JWTBundlesRequest message conveys parameters for requesting JWT bundles. diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go index 7c75602c22..d65dc8f012 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go @@ -23,7 +23,7 @@ func FormatPath(format string, args ...interface{}) (string, error) { func JoinPathSegments(segments ...string) (string, error) { var builder strings.Builder for _, segment := range segments { - if err := validatePathSegment(segment); err != nil { + if err := ValidatePathSegment(segment); err != nil { return "", err } builder.WriteByte('/') @@ -71,9 +71,15 @@ func ValidatePath(path string) error { return nil } -func validatePathSegment(segment string) error { - if segment == "" { +// ValidatePathSegment validates that a string is a conformant segment for +// inclusion in the path for a SPIFFE ID. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func ValidatePathSegment(segment string) error { + switch segment { + case "": return errEmptySegment + case ".", "..": + return errDotSegment } for i := 0; i < len(segment); i++ { if !isValidPathSegmentChar(segment[i]) { diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go index a3e0fe73f8..ddbfac34f7 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go @@ -4,11 +4,11 @@ import ( "fmt" "time" + "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/jwt" "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/zeebo/errs" - "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" ) var ( @@ -28,6 +28,9 @@ type SVID struct { Expiry time.Time // Claims is the parsed claims from token Claims map[string]interface{} + // Hint is an operator-specified string used to provide guidance on how this + // identity should be used by a workload when more than one SVID is returned. + Hint string // token is the serialized JWT token token string diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go index 5fecffe8fb..4ac51dae68 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go @@ -26,6 +26,10 @@ type SVID struct { // PrivateKey is the private key for the X509-SVID. PrivateKey crypto.Signer + + // Hint is an operator-specified string used to provide guidance on how this + // identity should be used by a workload when more than one SVID is returned. + Hint string } // Load loads the X509-SVID from PEM encoded files on disk. certFile and diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go index 3328a98fb2..7a9685cf36 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go @@ -426,7 +426,7 @@ func parseX509Context(resp *workload.X509SVIDResponse) (*X509Context, error) { // parseX509SVIDs parses one or all of the SVIDs in the response. If firstOnly // is true, then only the first SVID in the response is parsed and returned. -// Otherwise all SVIDs are parsed and returned. +// Otherwise, all SVIDs are parsed and returned. func parseX509SVIDs(resp *workload.X509SVIDResponse, firstOnly bool) ([]*x509svid.SVID, error) { n := len(resp.Svids) if n == 0 { @@ -436,10 +436,20 @@ func parseX509SVIDs(resp *workload.X509SVIDResponse, firstOnly bool) ([]*x509svi n = 1 } + hints := make(map[string]struct{}, n) svids := make([]*x509svid.SVID, 0, n) for i := 0; i < n; i++ { svid := resp.Svids[i] + // In the event of more than one X509SVID message with the same hint value set, then the first message in the + // list SHOULD be selected. + if _, ok := hints[svid.Hint]; ok && svid.Hint != "" { + continue + } + + hints[svid.Hint] = struct{}{} + s, err := x509svid.ParseRaw(svid.X509Svid, svid.X509SvidKey) + s.Hint = svid.Hint if err != nil { return nil, err } @@ -506,7 +516,7 @@ func parseX509BundlesResponse(resp *workload.X509BundlesResponse) (*x509bundle.S // parseJWTSVIDs parses one or all of the SVIDs in the response. If firstOnly // is true, then only the first SVID in the response is parsed and returned. -// Otherwise all SVIDs are parsed and returned. +// Otherwise, all SVIDs are parsed and returned. func parseJWTSVIDs(resp *workload.JWTSVIDResponse, audience []string, firstOnly bool) ([]*jwtsvid.SVID, error) { n := len(resp.Svids) if n == 0 { @@ -516,10 +526,19 @@ func parseJWTSVIDs(resp *workload.JWTSVIDResponse, audience []string, firstOnly n = 1 } + hints := make(map[string]struct{}, n) svids := make([]*jwtsvid.SVID, 0, n) for i := 0; i < n; i++ { svid := resp.Svids[i] + // In the event of more than one X509SVID message with the same hint value set, then the first message in the + // list SHOULD be selected. + if _, ok := hints[svid.Hint]; ok && svid.Hint != "" { + continue + } + hints[svid.Hint] = struct{}{} + s, err := jwtsvid.ParseInsecure(svid.Svid, audience) + s.Hint = svid.Hint if err != nil { return nil, err } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_bucket.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_bucket.go deleted file mode 100644 index abdfc6e7d8..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_bucket.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "os" - - corev1 "k8s.io/api/core/v1" -) - -const ( - // BucketLocationKey is the name of the configmap entry that specifies - // loction of the bucket. - BucketLocationKey = "location" - - // BucketServiceAccountSecretNameKey is the name of the configmap entry that specifies - // the name of the secret that will provide the servie account with bucket access. - // This secret must have a key called serviceaccount that will have a value with - // the service account with access to the bucket - BucketServiceAccountSecretNameKey = "bucket.service.account.secret.name" - - // BucketServiceAccountSecretKeyKey is the name of the configmap entry that specifies - // the secret key that will have a value with the service account json with access - // to the bucket - BucketServiceAccountSecretKeyKey = "bucket.service.account.secret.key" - - // DefaultBucketServiceFieldName defaults to a gcs bucket - DefaultBucketServiceFieldName = "GOOGLE_APPLICATION_CREDENTIALS" - - // BucketServiceAccountFieldNameKey is the name of the configmap entry that specifies - // the field name that should be used for the service account. - // Valid values: GOOGLE_APPLICATION_CREDENTIALS, BOTO_CONFIG. - BucketServiceAccountFieldNameKey = "bucket.service.account.field.name" -) - -// ArtifactBucket holds the configurations for the artifacts PVC -// +k8s:deepcopy-gen=true -type ArtifactBucket struct { - Location string - ServiceAccountSecretName string - ServiceAccountSecretKey string - ServiceAccountFieldName string -} - -// GetArtifactBucketConfigName returns the name of the configmap containing all -// customizations for the storage bucket. -func GetArtifactBucketConfigName() string { - if e := os.Getenv("CONFIG_ARTIFACT_BUCKET_NAME"); e != "" { - return e - } - return "config-artifact-bucket" -} - -// Equals returns true if two Configs are identical -func (cfg *ArtifactBucket) Equals(other *ArtifactBucket) bool { - if cfg == nil && other == nil { - return true - } - - if cfg == nil || other == nil { - return false - } - - return other.Location == cfg.Location && - other.ServiceAccountSecretName == cfg.ServiceAccountSecretName && - other.ServiceAccountSecretKey == cfg.ServiceAccountSecretKey && - other.ServiceAccountFieldName == cfg.ServiceAccountFieldName -} - -// NewArtifactBucketFromMap returns a Config given a map corresponding to a ConfigMap -func NewArtifactBucketFromMap(cfgMap map[string]string) (*ArtifactBucket, error) { - tc := ArtifactBucket{ - ServiceAccountFieldName: DefaultBucketServiceFieldName, - } - - if location, ok := cfgMap[BucketLocationKey]; ok { - tc.Location = location - } - - if serviceAccountSecretName, ok := cfgMap[BucketServiceAccountSecretNameKey]; ok { - tc.ServiceAccountSecretName = serviceAccountSecretName - } - - if serviceAccountSecretKey, ok := cfgMap[BucketServiceAccountSecretKeyKey]; ok { - tc.ServiceAccountSecretKey = serviceAccountSecretKey - } - - if serviceAccountFieldName, ok := cfgMap[BucketServiceAccountFieldNameKey]; ok { - tc.ServiceAccountFieldName = serviceAccountFieldName - } - - return &tc, nil -} - -// NewArtifactBucketFromConfigMap returns a Config for the given configmap -func NewArtifactBucketFromConfigMap(config *corev1.ConfigMap) (*ArtifactBucket, error) { - return NewArtifactBucketFromMap(config.Data) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_pvc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_pvc.go deleted file mode 100644 index 5434353d39..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_pvc.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "os" - - corev1 "k8s.io/api/core/v1" -) - -const ( - // DefaultPVCSize is the default size of the PVC to create - DefaultPVCSize = "5Gi" - - // PVCSizeKey is the name of the configmap entry that specifies the size of the PVC to create - PVCSizeKey = "size" - - // PVCStorageClassNameKey is the name of the configmap entry that specifies the storage class of the PVC to create - PVCStorageClassNameKey = "storageClassName" -) - -// ArtifactPVC holds the configurations for the artifacts PVC -// +k8s:deepcopy-gen=true -type ArtifactPVC struct { - Size string - StorageClassName string -} - -// GetArtifactPVCConfigName returns the name of the configmap containing all -// customizations for the storage PVC. -func GetArtifactPVCConfigName() string { - if e := os.Getenv("CONFIG_ARTIFACT_PVC_NAME"); e != "" { - return e - } - return "config-artifact-pvc" -} - -// Equals returns true if two Configs are identical -func (cfg *ArtifactPVC) Equals(other *ArtifactPVC) bool { - if cfg == nil && other == nil { - return true - } - - if cfg == nil || other == nil { - return false - } - - return other.Size == cfg.Size && - other.StorageClassName == cfg.StorageClassName -} - -// NewArtifactPVCFromMap returns a Config given a map corresponding to a ConfigMap -func NewArtifactPVCFromMap(cfgMap map[string]string) (*ArtifactPVC, error) { - tc := ArtifactPVC{ - Size: DefaultPVCSize, - } - - if size, ok := cfgMap[PVCSizeKey]; ok { - tc.Size = size - } - - if storageClassName, ok := cfgMap[PVCStorageClassNameKey]; ok { - tc.StorageClassName = storageClassName - } - - return &tc, nil -} - -// NewArtifactPVCFromConfigMap returns a Config for the given configmap -func NewArtifactPVCFromConfigMap(config *corev1.ConfigMap) (*ArtifactPVC, error) { - return NewArtifactPVCFromMap(config.Data) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index d17e707dcf..968dae25e8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -44,6 +44,8 @@ const ( DefaultCloudEventSinkValue = "" // DefaultMaxMatrixCombinationsCount is used when no max matrix combinations count is specified. DefaultMaxMatrixCombinationsCount = 256 + // DefaultResolverTypeValue is used when no default resolver type is specified + DefaultResolverTypeValue = "" defaultTimeoutMinutesKey = "default-timeout-minutes" defaultServiceAccountKey = "default-service-account" @@ -54,6 +56,7 @@ const ( defaultTaskRunWorkspaceBinding = "default-task-run-workspace-binding" defaultMaxMatrixCombinationsCountKey = "default-max-matrix-combinations-count" defaultForbiddenEnv = "default-forbidden-env" + defaultResolverTypeKey = "default-resolver-type" ) // Defaults holds the default configurations @@ -68,6 +71,7 @@ type Defaults struct { DefaultTaskRunWorkspaceBinding string DefaultMaxMatrixCombinationsCount int DefaultForbiddenEnv []string + DefaultResolverType string } // GetDefaultsConfigName returns the name of the configmap containing all @@ -97,6 +101,7 @@ func (cfg *Defaults) Equals(other *Defaults) bool { other.DefaultCloudEventsSink == cfg.DefaultCloudEventsSink && other.DefaultTaskRunWorkspaceBinding == cfg.DefaultTaskRunWorkspaceBinding && other.DefaultMaxMatrixCombinationsCount == cfg.DefaultMaxMatrixCombinationsCount && + other.DefaultResolverType == cfg.DefaultResolverType && reflect.DeepEqual(other.DefaultForbiddenEnv, cfg.DefaultForbiddenEnv) } @@ -108,6 +113,7 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { DefaultManagedByLabelValue: DefaultManagedByLabelValue, DefaultCloudEventsSink: DefaultCloudEventSinkValue, DefaultMaxMatrixCombinationsCount: DefaultMaxMatrixCombinationsCount, + DefaultResolverType: DefaultResolverTypeValue, } if defaultTimeoutMin, ok := cfgMap[defaultTimeoutMinutesKey]; ok { @@ -166,6 +172,10 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { tc.DefaultForbiddenEnv = tmpString.List() } + if defaultResolverType, ok := cfgMap[defaultResolverTypeKey]; ok { + tc.DefaultResolverType = defaultResolverType + } + return &tc, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go index 305200b7fe..699a65516e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go @@ -33,24 +33,19 @@ const ( AlphaAPIFields = "alpha" // BetaAPIFields is the value used for "enable-api-fields" when beta APIs should be usable as well. BetaAPIFields = "beta" - // EnforceResourceVerificationMode is the value used for "resource-verification-mode" when verification is applied and fail the - // TaskRun or PipelineRun when verification fails - EnforceResourceVerificationMode = "enforce" - // WarnResourceVerificationMode is the value used for "resource-verification-mode" when verification is applied but only log - // the warning when verification fails - WarnResourceVerificationMode = "warn" - // SkipResourceVerificationMode is the value used for "resource-verification-mode" when verification is skipped - SkipResourceVerificationMode = "skip" + // FailNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to fail TaskRun or PipelineRun + // when no matching policies are found + FailNoMatchPolicy = "fail" + // WarnNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to log warning and skip verification + // when no matching policies are found + WarnNoMatchPolicy = "warn" + // IgnoreNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to skip verification + // when no matching policies are found + IgnoreNoMatchPolicy = "ignore" // ResultExtractionMethodTerminationMessage is the value used for "results-from" as a way to extract results from tasks using kubernetes termination message. ResultExtractionMethodTerminationMessage = "termination-message" // ResultExtractionMethodSidecarLogs is the value used for "results-from" as a way to extract results from tasks using sidecar logs. ResultExtractionMethodSidecarLogs = "sidecar-logs" - // CustomTaskVersionAlpha is the value used for "custom-task-version" when the PipelineRun reconciler should create - // v1alpha1.Runs. - CustomTaskVersionAlpha = "v1alpha1" - // CustomTaskVersionBeta is the value used for "custom-task-version" when the PipelineRun reconciler should create - // v1beta1.CustomRuns. - CustomTaskVersionBeta = "v1beta1" // DefaultDisableAffinityAssistant is the default value for "disable-affinity-assistant". DefaultDisableAffinityAssistant = false // DefaultDisableCredsInit is the default value for "disable-creds-init". @@ -73,35 +68,34 @@ const ( EnforceNonfalsifiabilityNone = "" // DefaultEnforceNonfalsifiability is the default value for "enforce-nonfalsifiability". DefaultEnforceNonfalsifiability = EnforceNonfalsifiabilityNone - // DefaultResourceVerificationMode is the default value for "resource-verification-mode". - DefaultResourceVerificationMode = SkipResourceVerificationMode + // DefaultNoMatchPolicyConfig is the default value for "trusted-resources-verification-no-match-policy". + DefaultNoMatchPolicyConfig = IgnoreNoMatchPolicy // DefaultEnableProvenanceInStatus is the default value for "enable-provenance-status". DefaultEnableProvenanceInStatus = false // DefaultResultExtractionMethod is the default value for ResultExtractionMethod DefaultResultExtractionMethod = ResultExtractionMethodTerminationMessage // DefaultMaxResultSize is the default value in bytes for the size of a result DefaultMaxResultSize = 4096 - // DefaultCustomTaskVersion is the default value for "custom-task-version" - DefaultCustomTaskVersion = CustomTaskVersionBeta disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars" awaitSidecarReadinessKey = "await-sidecar-readiness" - requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" // nolint: gosec + requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" //nolint:gosec enableTektonOCIBundles = "enable-tekton-oci-bundles" enableAPIFields = "enable-api-fields" sendCloudEventsForRuns = "send-cloudevents-for-runs" enforceNonfalsifiability = "enforce-nonfalsifiability" - verificationMode = "resource-verification-mode" + verificationNoMatchPolicy = "trusted-resources-verification-no-match-policy" enableProvenanceInStatus = "enable-provenance-in-status" resultExtractionMethod = "results-from" maxResultSize = "max-result-size" - customTaskVersion = "custom-task-version" ) // FeatureFlags holds the features configurations // +k8s:deepcopy-gen=true +// +//nolint:musttag type FeatureFlags struct { DisableAffinityAssistant bool DisableCredsInit bool @@ -113,11 +107,15 @@ type FeatureFlags struct { SendCloudEventsForRuns bool AwaitSidecarReadiness bool EnforceNonfalsifiability string - ResourceVerificationMode string - EnableProvenanceInStatus bool - ResultExtractionMethod string - MaxResultSize int - CustomTaskVersion string + // VerificationNoMatchPolicy is the feature flag for "trusted-resources-verification-no-match-policy" + // VerificationNoMatchPolicy can be set to "ignore", "warn" and "fail" values. + // ignore: skip trusted resources verification when no matching verification policies found + // warn: skip trusted resources verification when no matching verification policies found and log a warning + // fail: fail the taskrun or pipelines run if no matching verification policies found + VerificationNoMatchPolicy string + EnableProvenanceInStatus bool + ResultExtractionMethod string + MaxResultSize int } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -129,29 +127,13 @@ func GetFeatureFlagsConfigName() string { return "feature-flags" } -func getEnforceNonfalsifiabilityFeature(cfgMap map[string]string) (string, error) { - var mapValue struct{} - var acceptedValues = map[string]struct{}{ - EnforceNonfalsifiabilityNone: mapValue, - EnforceNonfalsifiabilityWithSpire: mapValue, - } - var value = DefaultEnforceNonfalsifiability - if cfg, ok := cfgMap[enforceNonfalsifiability]; ok { - value = strings.ToLower(cfg) - } - if _, ok := acceptedValues[value]; !ok { - return DefaultEnforceNonfalsifiability, fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value) - } - return value, nil -} - // NewFeatureFlagsFromMap returns a Config given a map corresponding to a ConfigMap func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { setFeature := func(key string, defaultValue bool, feature *bool) error { if cfg, ok := cfgMap[key]; ok { value, err := strconv.ParseBool(cfg) if err != nil { - return fmt.Errorf("failed parsing feature flags config %q: %v", cfg, err) + return fmt.Errorf("failed parsing feature flags config %q: %w", cfg, err) } *feature = value return nil @@ -182,7 +164,7 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(sendCloudEventsForRuns, DefaultSendCloudEventsForRuns, &tc.SendCloudEventsForRuns); err != nil { return nil, err } - if err := setResourceVerificationMode(cfgMap, DefaultResourceVerificationMode, &tc.ResourceVerificationMode); err != nil { + if err := setVerificationNoMatchPolicy(cfgMap, DefaultNoMatchPolicyConfig, &tc.VerificationNoMatchPolicy); err != nil { return nil, err } if err := setFeature(enableProvenanceInStatus, DefaultEnableProvenanceInStatus, &tc.EnableProvenanceInStatus); err != nil { @@ -194,7 +176,7 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setMaxResultSize(cfgMap, DefaultMaxResultSize, &tc.MaxResultSize); err != nil { return nil, err } - if err := setCustomTaskVersion(cfgMap, DefaultCustomTaskVersion, &tc.CustomTaskVersion); err != nil { + if err := setEnforceNonFalsifiability(cfgMap, tc.EnableAPIFields, &tc.EnforceNonfalsifiability); err != nil { return nil, err } @@ -206,21 +188,10 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { // defeat the purpose of having a single shared gate for all alpha features. if tc.EnableAPIFields == AlphaAPIFields { tc.EnableTektonOCIBundles = true - // Only consider SPIRE if alpha is on. - enforceNonfalsifiabilityValue, err := getEnforceNonfalsifiabilityFeature(cfgMap) - if err != nil { - return nil, err - } - tc.EnforceNonfalsifiability = enforceNonfalsifiabilityValue } else { if err := setFeature(enableTektonOCIBundles, DefaultEnableTektonOciBundles, &tc.EnableTektonOCIBundles); err != nil { return nil, err } - // Do not enable any form of non-falsifiability enforcement in non-alpha mode. - tc.EnforceNonfalsifiability = EnforceNonfalsifiabilityNone - if enforceNonfalsifiabilityValue, err := getEnforceNonfalsifiabilityFeature(cfgMap); err != nil || enforceNonfalsifiabilityValue != DefaultEnforceNonfalsifiability { - return nil, fmt.Errorf("%q can be set to non-default values (%q) only in alpha", enforceNonfalsifiability, enforceNonfalsifiabilityValue) - } } return &tc, nil } @@ -241,34 +212,47 @@ func setEnabledAPIFields(cfgMap map[string]string, defaultValue string, feature return nil } -// setResultExtractionMethod sets the "results-from" flag based on the content of a given map. -// If the feature gate is invalid or missing then an error is returned. -func setResultExtractionMethod(cfgMap map[string]string, defaultValue string, feature *string) error { - value := defaultValue - if cfg, ok := cfgMap[resultExtractionMethod]; ok { +// setEnforceNonFalsifiability sets the "enforce-nonfalsifiability" flag based on the content of a given map. +// If the feature gate is invalid, then an error is returned. +func setEnforceNonFalsifiability(cfgMap map[string]string, enableAPIFields string, feature *string) error { + var value = DefaultEnforceNonfalsifiability + if cfg, ok := cfgMap[enforceNonfalsifiability]; ok { value = strings.ToLower(cfg) } + + // validate that "enforce-nonfalsifiability" is set to a valid value switch value { - case ResultExtractionMethodTerminationMessage, ResultExtractionMethodSidecarLogs: + case EnforceNonfalsifiabilityNone, EnforceNonfalsifiabilityWithSpire: + break + default: + return fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value) + } + + // validate that "enforce-nonfalsifiability" is set to allowed values for stability level + switch enableAPIFields { + case AlphaAPIFields: *feature = value default: - return fmt.Errorf("invalid value for feature flag %q: %q", resultExtractionMethod, value) + // Do not consider any form of non-falsifiability enforcement in non-alpha mode + if value != DefaultEnforceNonfalsifiability { + return fmt.Errorf("%q can be set to non-default values (%q) only in alpha", enforceNonfalsifiability, value) + } } return nil } -// setCustomTaskVersion sets the "custom-task-version" flag based on the content of a given map. +// setResultExtractionMethod sets the "results-from" flag based on the content of a given map. // If the feature gate is invalid or missing then an error is returned. -func setCustomTaskVersion(cfgMap map[string]string, defaultValue string, feature *string) error { +func setResultExtractionMethod(cfgMap map[string]string, defaultValue string, feature *string) error { value := defaultValue - if cfg, ok := cfgMap[customTaskVersion]; ok { + if cfg, ok := cfgMap[resultExtractionMethod]; ok { value = strings.ToLower(cfg) } switch value { - case CustomTaskVersionAlpha, CustomTaskVersionBeta: + case ResultExtractionMethodTerminationMessage, ResultExtractionMethodSidecarLogs: *feature = value default: - return fmt.Errorf("invalid value for feature flag %q: %q", customTaskVersion, value) + return fmt.Errorf("invalid value for feature flag %q: %q", resultExtractionMethod, value) } return nil } @@ -286,24 +270,24 @@ func setMaxResultSize(cfgMap map[string]string, defaultValue int, feature *int) } // if max limit is > 1.5 MB (CRD limit). if value >= 1572864 { - return fmt.Errorf("invalid value for feature flag %q: %q. This is exceeding the CRD limit", resultExtractionMethod, value) + return fmt.Errorf("invalid value for feature flag %q: %q. This is exceeding the CRD limit", resultExtractionMethod, fmt.Sprint(value)) } *feature = value return nil } -// setResourceVerificationMode sets the "resource-verification-mode" flag based on the content of a given map. +// setVerificationNoMatchPolicy sets the "trusted-resources-verification-no-match-policy" flag based on the content of a given map. // If the value is invalid or missing then an error is returned. -func setResourceVerificationMode(cfgMap map[string]string, defaultValue string, feature *string) error { +func setVerificationNoMatchPolicy(cfgMap map[string]string, defaultValue string, feature *string) error { value := defaultValue - if cfg, ok := cfgMap[verificationMode]; ok { + if cfg, ok := cfgMap[verificationNoMatchPolicy]; ok { value = strings.ToLower(cfg) } switch value { - case EnforceResourceVerificationMode, WarnResourceVerificationMode, SkipResourceVerificationMode: + case FailNoMatchPolicy, WarnNoMatchPolicy, IgnoreNoMatchPolicy: *feature = value default: - return fmt.Errorf("invalid value for feature flag %q: %q", verificationMode, value) + return fmt.Errorf("invalid value for feature flag %q: %q", verificationNoMatchPolicy, value) } return nil } @@ -328,18 +312,9 @@ func EnableStableAPIFields(ctx context.Context) context.Context { return setEnableAPIFields(ctx, StableAPIFields) } -// CheckEnforceResourceVerificationMode returns true if the ResourceVerificationMode is EnforceResourceVerificationMode -// else returns false -func CheckEnforceResourceVerificationMode(ctx context.Context) bool { - cfg := FromContextOrDefaults(ctx) - return cfg.FeatureFlags.ResourceVerificationMode == EnforceResourceVerificationMode -} - -// CheckWarnResourceVerificationMode returns true if the ResourceVerificationMode is WarnResourceVerificationMode -// else returns false -func CheckWarnResourceVerificationMode(ctx context.Context) bool { - cfg := FromContextOrDefaults(ctx) - return cfg.FeatureFlags.ResourceVerificationMode == WarnResourceVerificationMode +// GetVerificationNoMatchPolicy returns the "trusted-resources-verification-no-match-policy" value +func GetVerificationNoMatchPolicy(ctx context.Context) string { + return FromContextOrDefaults(ctx).FeatureFlags.VerificationNoMatchPolicy } // CheckAlphaOrBetaAPIFields return true if the enable-api-fields is either set to alpha or set to beta @@ -348,6 +323,11 @@ func CheckAlphaOrBetaAPIFields(ctx context.Context) bool { return cfg.FeatureFlags.EnableAPIFields == AlphaAPIFields || cfg.FeatureFlags.EnableAPIFields == BetaAPIFields } +// IsSpireEnabled checks if non-falsifiable provenance is enforced through SPIRE +func IsSpireEnabled(ctx context.Context) bool { + return FromContextOrDefaults(ctx).FeatureFlags.EnforceNonfalsifiability == EnforceNonfalsifiabilityWithSpire +} + func setEnableAPIFields(ctx context.Context, want string) context.Context { featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ "enable-api-fields": want, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go index 5143d3e0b7..9cb15bdf0b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go @@ -28,13 +28,10 @@ type cfgKey struct{} // Config holds the collection of configurations that we attach to contexts. // +k8s:deepcopy-gen=false type Config struct { - Defaults *Defaults - FeatureFlags *FeatureFlags - ArtifactBucket *ArtifactBucket - ArtifactPVC *ArtifactPVC - Metrics *Metrics - TrustedResources *TrustedResources - SpireConfig *sc.SpireConfig + Defaults *Defaults + FeatureFlags *FeatureFlags + Metrics *Metrics + SpireConfig *sc.SpireConfig } // FromContext extracts a Config from the provided context. @@ -54,20 +51,14 @@ func FromContextOrDefaults(ctx context.Context) *Config { } defaults, _ := NewDefaultsFromMap(map[string]string{}) featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{}) - artifactBucket, _ := NewArtifactBucketFromMap(map[string]string{}) - artifactPVC, _ := NewArtifactPVCFromMap(map[string]string{}) metrics, _ := newMetricsFromMap(map[string]string{}) - trustedresources, _ := NewTrustedResourcesConfigFromMap(map[string]string{}) spireconfig, _ := NewSpireConfigFromMap(map[string]string{}) return &Config{ - Defaults: defaults, - FeatureFlags: featureFlags, - ArtifactBucket: artifactBucket, - ArtifactPVC: artifactPVC, - Metrics: metrics, - TrustedResources: trustedresources, - SpireConfig: spireconfig, + Defaults: defaults, + FeatureFlags: featureFlags, + Metrics: metrics, + SpireConfig: spireconfig, } } @@ -90,13 +81,10 @@ func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value i "defaults/features/artifacts", logger, configmap.Constructors{ - GetDefaultsConfigName(): NewDefaultsFromConfigMap, - GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap, - GetArtifactBucketConfigName(): NewArtifactBucketFromConfigMap, - GetArtifactPVCConfigName(): NewArtifactPVCFromConfigMap, - GetMetricsConfigName(): NewMetricsFromConfigMap, - GetTrustedResourcesConfigName(): NewTrustedResourcesConfigFromConfigMap, - GetSpireConfigName(): NewSpireConfigFromConfigMap, + GetDefaultsConfigName(): NewDefaultsFromConfigMap, + GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap, + GetMetricsConfigName(): NewMetricsFromConfigMap, + GetSpireConfigName(): NewSpireConfigFromConfigMap, }, onAfterStore..., ), @@ -120,35 +108,20 @@ func (s *Store) Load() *Config { if featureFlags == nil { featureFlags, _ = NewFeatureFlagsFromMap(map[string]string{}) } - artifactBucket := s.UntypedLoad(GetArtifactBucketConfigName()) - if artifactBucket == nil { - artifactBucket, _ = NewArtifactBucketFromMap(map[string]string{}) - } - artifactPVC := s.UntypedLoad(GetArtifactPVCConfigName()) - if artifactPVC == nil { - artifactPVC, _ = NewArtifactPVCFromMap(map[string]string{}) - } - metrics := s.UntypedLoad(GetMetricsConfigName()) if metrics == nil { metrics, _ = newMetricsFromMap(map[string]string{}) } - trustedresources := s.UntypedLoad(GetTrustedResourcesConfigName()) - if trustedresources == nil { - trustedresources, _ = NewTrustedResourcesConfigFromMap(map[string]string{}) - } + spireconfig := s.UntypedLoad(GetSpireConfigName()) if spireconfig == nil { spireconfig, _ = NewSpireConfigFromMap(map[string]string{}) } return &Config{ - Defaults: defaults.(*Defaults).DeepCopy(), - FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(), - ArtifactBucket: artifactBucket.(*ArtifactBucket).DeepCopy(), - ArtifactPVC: artifactPVC.(*ArtifactPVC).DeepCopy(), - Metrics: metrics.(*Metrics).DeepCopy(), - TrustedResources: trustedresources.(*TrustedResources).DeepCopy(), - SpireConfig: spireconfig.(*sc.SpireConfig).DeepCopy(), + Defaults: defaults.(*Defaults).DeepCopy(), + FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(), + Metrics: metrics.(*Metrics).DeepCopy(), + SpireConfig: spireconfig.(*sc.SpireConfig).DeepCopy(), } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/trusted_resources.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/trusted_resources.go deleted file mode 100644 index 778e3ad816..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/trusted_resources.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2022 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "os" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - - cm "knative.dev/pkg/configmap" -) - -// TrustedResources holds the collection of configurations that we attach to contexts. -// Configmap named with "config-trusted-resources" where cosign pub key path and -// KMS pub key path can be configured -// Deprecated. -// +k8s:deepcopy-gen=true -type TrustedResources struct { - // Keys defines the name of the key in configmap data - Keys sets.String -} - -const ( - // DefaultPublicKeyPath is the default path of public key - DefaultPublicKeyPath = "" - // PublicKeys is the name of the public key keyref in configmap data - PublicKeys = "publickeys" - // TrustedTaskConfig is the name of the trusted resources configmap - TrustedTaskConfig = "config-trusted-resources" -) - -// NewTrustedResourcesConfigFromMap creates a Config from the supplied map -func NewTrustedResourcesConfigFromMap(data map[string]string) (*TrustedResources, error) { - cfg := &TrustedResources{ - Keys: sets.NewString(DefaultPublicKeyPath), - } - if err := cm.Parse(data, - cm.AsStringSet(PublicKeys, &cfg.Keys), - ); err != nil { - return nil, fmt.Errorf("failed to parse data: %w", err) - } - return cfg, nil -} - -// NewTrustedResourcesConfigFromConfigMap creates a Config from the supplied ConfigMap -func NewTrustedResourcesConfigFromConfigMap(configMap *corev1.ConfigMap) (*TrustedResources, error) { - return NewTrustedResourcesConfigFromMap(configMap.Data) -} - -// GetTrustedResourcesConfigName returns the name of TrustedResources ConfigMap -func GetTrustedResourcesConfigName() string { - if e := os.Getenv("CONFIG_TRUSTED_RESOURCES_NAME"); e != "" { - return e - } - return TrustedTaskConfig -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go index b6c8c8febd..15d1070d8e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go @@ -23,41 +23,8 @@ package config import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" - sets "k8s.io/apimachinery/pkg/util/sets" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactBucket) DeepCopyInto(out *ArtifactBucket) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactBucket. -func (in *ArtifactBucket) DeepCopy() *ArtifactBucket { - if in == nil { - return nil - } - out := new(ArtifactBucket) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactPVC) DeepCopyInto(out *ArtifactPVC) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactPVC. -func (in *ArtifactPVC) DeepCopy() *ArtifactPVC { - if in == nil { - return nil - } - out := new(ArtifactPVC) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Defaults) DeepCopyInto(out *Defaults) { *out = *in @@ -120,26 +87,3 @@ func (in *Metrics) DeepCopy() *Metrics { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TrustedResources) DeepCopyInto(out *TrustedResources) { - *out = *in - if in.Keys != nil { - in, out := &in.Keys, &out.Keys - *out = make(sets.String, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedResources. -func (in *TrustedResources) DeepCopy() *TrustedResources { - if in == nil { - return nil - } - out := new(TrustedResources) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/controller.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/controller.go index dd5669174a..5f15500709 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/controller.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/controller.go @@ -18,11 +18,9 @@ package pipeline const ( // PipelineRunControllerName holds the name of the PipelineRun controller - // nolint: revive PipelineRunControllerName = "PipelineRun" // PipelineControllerName holds the name of the Pipeline controller - // nolint: revive PipelineControllerName = "Pipeline" // TaskRunControllerName holds the name of the TaskRun controller diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/images.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/images.go index f489e9441a..ae3127ca73 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/images.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/images.go @@ -30,16 +30,10 @@ type Images struct { SidecarLogResultsImage string // NopImage is the container image used to kill sidecars. NopImage string - // GitImage is the container image with Git that we use to implement the Git source step. - GitImage string // ShellImage is the container image containing bash shell. ShellImage string // ShellImageWin is the container image containing powershell. ShellImageWin string - // GsutilImage is the container image containing gsutil. - GsutilImage string - // ImageDigestExporterImage is the container image containing our image digest exporter binary. - ImageDigestExporterImage string // WorkingDirInitImage is the container image containing our working dir init binary. WorkingDirInitImage string @@ -55,11 +49,8 @@ func (i Images) Validate() error { {i.EntrypointImage, "entrypoint-image"}, {i.SidecarLogResultsImage, "sidecarlogresults-image"}, {i.NopImage, "nop-image"}, - {i.GitImage, "git-image"}, {i.ShellImage, "shell-image"}, {i.ShellImageWin, "shell-image-win"}, - {i.GsutilImage, "gsutil-image"}, - {i.ImageDigestExporterImage, "imagedigest-exporter-image"}, {i.WorkingDirInitImage, "workingdirinit-image"}, } { if f.v == "" { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go index a95b23e72a..0a5ab30547 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go @@ -43,6 +43,9 @@ const ( // RunKey is used as the label identifier for a Run RunKey = GroupName + "/run" + // CustomRunKey is used as the label identifier for a CustomRun + CustomRunKey = GroupName + "/customRun" + // MemberOfLabelKey is used as the label identifier for a PipelineTask // Set to Tasks/Finally depending on the position of the PipelineTask MemberOfLabelKey = GroupName + "/memberOf" @@ -80,9 +83,9 @@ var ( Resource: "pipelineruns", } - // PipelineResourceResource represents a Tekton PipelineResource - PipelineResourceResource = schema.GroupResource{ + // CustomRunResource represents a Tekton CustomRun + CustomRunResource = schema.GroupResource{ Group: GroupName, - Resource: "pipelineresources", + Resource: "customruns", } ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go new file mode 100644 index 0000000000..67fb8a6b81 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go @@ -0,0 +1,362 @@ +/* +Copyright 2023 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "sort" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "golang.org/x/exp/maps" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/strings/slices" + "knative.dev/pkg/apis" +) + +// Matrix is used to fan out Tasks in a Pipeline +type Matrix struct { + // Params is a list of parameters used to fan out the pipelineTask + // Params takes only `Parameters` of type `"array"` + // Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`. + // The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting. + // +listType=atomic + Params Params `json:"params,omitempty"` + + // Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix. + // +optional + // +listType=atomic + Include IncludeParamsList `json:"include,omitempty"` +} + +// IncludeParamsList is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix. +type IncludeParamsList []IncludeParams + +// IncludeParams allows passing in a specific combinations of Parameters into the Matrix. +type IncludeParams struct { + // Name the specified combination + Name string `json:"name,omitempty"` + + // Params takes only `Parameters` of type `"string"` + // The names of the `params` must match the names of the `params` in the underlying `Task` + // +listType=atomic + Params Params `json:"params,omitempty"` +} + +// Combination is a map, mainly defined to hold a single combination from a Matrix with key as param.Name and value as param.Value +type Combination map[string]string + +// Combinations is a Combination list +type Combinations []Combination + +// FanOut returns an list of params that represent combinations +func (m *Matrix) FanOut() []Params { + var combinations, includeCombinations Combinations + includeCombinations = m.getIncludeCombinations() + if m.HasInclude() && !m.HasParams() { + // If there are only Matrix Include Parameters return explicit combinations + return includeCombinations.toParams() + } + // Generate combinations from Matrix Parameters + for _, parameter := range m.Params { + combinations = combinations.fanOutMatrixParams(parameter) + } + combinations.overwriteCombinations(includeCombinations) + combinations = combinations.addNewCombinations(includeCombinations) + return combinations.toParams() +} + +// overwriteCombinations replaces any missing include params in the initial +// matrix params combinations by overwriting the initial combinations with the +// include combinations +func (cs Combinations) overwriteCombinations(ics Combinations) { + for _, paramCombination := range cs { + for _, includeCombination := range ics { + if paramCombination.contains(includeCombination) { + // overwrite the parameter name and value in existing combination + // with the include combination + for name, val := range includeCombination { + paramCombination[name] = val + } + } + } + } +} + +// addNewCombinations creates a new combination for any include parameter +// values that are missing entirely from the initial combinations and +// returns all combinations +func (cs Combinations) addNewCombinations(ics Combinations) Combinations { + for _, includeCombination := range ics { + if cs.shouldAddNewCombination(includeCombination) { + cs = append(cs, includeCombination) + } + } + return cs +} + +// contains returns true if the include parameter name and value exists in combinations +func (c Combination) contains(includeCombination Combination) bool { + for name, val := range includeCombination { + if _, exist := c[name]; exist { + if c[name] != val { + return false + } + } + } + return true +} + +// shouldAddNewCombination returns true if the include parameter name exists but the value is +// missing from combinations +func (cs Combinations) shouldAddNewCombination(includeCombination map[string]string) bool { + if len(includeCombination) == 0 { + return false + } + for _, paramCombination := range cs { + for name, val := range includeCombination { + if _, exist := paramCombination[name]; exist { + if paramCombination[name] == val { + return false + } + } + } + } + return true +} + +// toParams transforms Combinations from a slice of map[string]string to a slice of Params +// such that, these combinations can be directly consumed in creating taskRun/run object +func (cs Combinations) toParams() []Params { + listOfParams := make([]Params, len(cs)) + for i := range cs { + var params Params + combination := cs[i] + order, _ := combination.sortCombination() + for _, key := range order { + params = append(params, Param{ + Name: key, + Value: ParamValue{Type: ParamTypeString, StringVal: combination[key]}, + }) + } + listOfParams[i] = params + } + return listOfParams +} + +// fanOutMatrixParams generates new combinations based on Matrix Parameters. +func (cs Combinations) fanOutMatrixParams(param Param) Combinations { + if len(cs) == 0 { + return initializeCombinations(param) + } + return cs.distribute(param) +} + +// getIncludeCombinations generates combinations based on Matrix Include Parameters +func (m *Matrix) getIncludeCombinations() Combinations { + var combinations Combinations + for i := range m.Include { + includeParams := m.Include[i].Params + newCombination := make(Combination) + for _, param := range includeParams { + newCombination[param.Name] = param.Value.StringVal + } + combinations = append(combinations, newCombination) + } + return combinations +} + +// distribute generates a new Combination of Parameters by adding a new Parameter to an existing list of Combinations. +func (cs Combinations) distribute(param Param) Combinations { + var expandedCombinations Combinations + for _, value := range param.Value.ArrayVal { + for _, combination := range cs { + newCombination := make(Combination) + maps.Copy(newCombination, combination) + newCombination[param.Name] = value + _, orderedCombination := newCombination.sortCombination() + expandedCombinations = append(expandedCombinations, orderedCombination) + } + } + return expandedCombinations +} + +// initializeCombinations generates a new Combination based on the first Parameter in the Matrix. +func initializeCombinations(param Param) Combinations { + var combinations Combinations + for _, value := range param.Value.ArrayVal { + combinations = append(combinations, Combination{param.Name: value}) + } + return combinations +} + +// sortCombination sorts the given Combination based on the Parameter names to produce a deterministic ordering +func (c Combination) sortCombination() ([]string, Combination) { + sortedCombination := make(Combination, len(c)) + order := make([]string, 0, len(c)) + for key := range c { + order = append(order, key) + } + sort.Slice(order, func(i, j int) bool { + return order[i] <= order[j] + }) + for _, key := range order { + sortedCombination[key] = c[key] + } + return order, sortedCombination +} + +// CountCombinations returns the count of Combinations of Parameters generated from the Matrix in PipelineTask. +func (m *Matrix) CountCombinations() int { + // Iterate over Matrix Parameters and compute count of all generated Combinations + count := m.countGeneratedCombinationsFromParams() + + // Add any additional Combinations generated from Matrix Include Parameters + count += m.countNewCombinationsFromInclude() + + return count +} + +// countGeneratedCombinationsFromParams returns the count of Combinations of Parameters generated from the Matrix +// Parameters +func (m *Matrix) countGeneratedCombinationsFromParams() int { + if !m.HasParams() { + return 0 + } + count := 1 + for _, param := range m.Params { + count *= len(param.Value.ArrayVal) + } + return count +} + +// countNewCombinationsFromInclude returns the count of Combinations of Parameters generated from the Matrix +// Include Parameters +func (m *Matrix) countNewCombinationsFromInclude() int { + if !m.HasInclude() { + return 0 + } + if !m.HasParams() { + return len(m.Include) + } + count := 0 + matrixParamMap := m.Params.extractParamMapArrVals() + for _, include := range m.Include { + for _, param := range include.Params { + if val, exist := matrixParamMap[param.Name]; exist { + // If the Matrix Include param values does not exist, a new Combination will be generated + if !slices.Contains(val, param.Value.StringVal) { + count++ + } else { + break + } + } + } + } + return count +} + +// HasInclude returns true if the Matrix has Include Parameters +func (m *Matrix) HasInclude() bool { + return m != nil && m.Include != nil && len(m.Include) > 0 +} + +// HasParams returns true if the Matrix has Parameters +func (m *Matrix) HasParams() bool { + return m != nil && m.Params != nil && len(m.Params) > 0 +} + +// GetAllParams returns a list of all Matrix Parameters +func (m *Matrix) GetAllParams() Params { + var params Params + if m.HasParams() { + params = append(params, m.Params...) + } + if m.HasInclude() { + for _, include := range m.Include { + params = append(params, include.Params...) + } + } + return params +} + +func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.FieldError) { + matrixCombinationsCount := m.CountCombinations() + maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount + if matrixCombinationsCount > maxMatrixCombinationsCount { + errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix")) + } + return errs +} + +// validateParams validates the type of Parameter for Matrix.Params and Matrix.Include.Params +// Matrix.Params must be of type array. Matrix.Include.Params must be of type string. +// validateParams also validates Matrix.Params for a unique list of params +// and a unique list of params in each Matrix.Include.Params specification +func (m *Matrix) validateParams() (errs *apis.FieldError) { + if m != nil { + if m.HasInclude() { + for i, include := range m.Include { + errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i))) + for _, param := range include.Params { + if param.Value.Type != ParamTypeString { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type string only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.include.params", param.Name)) + } + } + } + } + if m.HasParams() { + errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params")) + for _, param := range m.Params { + if param.Value.Type != ParamTypeArray { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type array only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.params", param.Name)) + } + } + } + } + return errs +} + +// validatePipelineParametersVariablesInMatrixParameters validates all pipeline parameter variables including Matrix.Params and Matrix.Include.Params +// that may contain the reference(s) to other params to make sure those references are used appropriately. +func (m *Matrix) validatePipelineParametersVariablesInMatrixParameters(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + if m.HasInclude() { + for _, include := range m.Include { + for idx, param := range include.Params { + stringElement := param.Value.StringVal + // Matrix Include Params must be of type string + errs = errs.Also(validateStringVariable(stringElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("", idx).ViaField("matrix.include.params", "")) + } + } + } + if m.HasParams() { + for _, param := range m.Params { + for idx, arrayElement := range param.Value.ArrayVal { + // Matrix Params must be of type array + errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix.params", param.Name)) + } + } + } + return errs +} + +func (m *Matrix) validateParameterInOneOfMatrixOrParams(params []Param) (errs *apis.FieldError) { + matrixParamNames := m.GetAllParams().ExtractNames() + for _, param := range params { + if matrixParamNames.Has(param.Name) { + errs = errs.Also(apis.ErrMultipleOneOf("matrix["+param.Name+"]", "params["+param.Name+"]")) + } + } + return errs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go index 9585668c89..b500ef8758 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go @@ -20,6 +20,7 @@ import ( "encoding/json" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/strategicpatch" ) @@ -67,6 +68,65 @@ func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, e return steps, nil } +// MergeStepsWithSpecs takes a possibly nil list of overrides and a +// list of steps, merging each of the steps with the overrides' resource requirements, if +// it's not nil, and returning the resulting list. +func MergeStepsWithSpecs(steps []Step, overrides []TaskRunStepSpec) ([]Step, error) { + stepNameToOverride := make(map[string]TaskRunStepSpec, len(overrides)) + for _, o := range overrides { + stepNameToOverride[o.Name] = o + } + for i, s := range steps { + o, found := stepNameToOverride[s.Name] + if !found { + continue + } + merged := v1.ResourceRequirements{} + err := mergeObjWithTemplate(&s.ComputeResources, &o.ComputeResources, &merged) + if err != nil { + return nil, err + } + steps[i].ComputeResources = merged + } + return steps, nil +} + +// MergeSidecarsWithSpecs takes a possibly nil list of overrides and a +// list of sidecars, merging each of the sidecars with the overrides' resource requirements, if +// it's not nil, and returning the resulting list. +func MergeSidecarsWithSpecs(sidecars []Sidecar, overrides []TaskRunSidecarSpec) ([]Sidecar, error) { + if len(overrides) == 0 { + return sidecars, nil + } + sidecarNameToOverride := make(map[string]TaskRunSidecarSpec, len(overrides)) + for _, o := range overrides { + sidecarNameToOverride[o.Name] = o + } + for i, s := range sidecars { + o, found := sidecarNameToOverride[s.Name] + if !found { + continue + } + merged := v1.ResourceRequirements{} + err := mergeObjWithTemplate(&s.ComputeResources, &o.ComputeResources, &merged) + if err != nil { + return nil, err + } + sidecars[i].ComputeResources = merged + } + return sidecars, nil +} + +// mergeObjWithTemplate merges obj with template and updates out to reflect the merged result. +// template, obj, and out should point to the same type. out points to the zero value of that type. +func mergeObjWithTemplate(template, obj, out interface{}) error { + md, err := getMergeData(template, out) + if err != nil { + return err + } + return mergeObjWithTemplateBytes(md, obj, out) +} + // getMergeData serializes the template and empty object to get the intermediate results necessary for // merging an object of the same type with this template. // This function is provided to avoid repeatedly serializing an identical template. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go index 24f3af5799..5915648f90 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go @@ -33,8 +33,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference": schema_pkg_apis_pipeline_v1_ChildStatusReference(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ConfigSource": schema_pkg_apis_pipeline_v1_ConfigSource(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask": schema_pkg_apis_pipeline_v1_EmbeddedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.IncludeParams": schema_pkg_apis_pipeline_v1_IncludeParams(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix": schema_pkg_apis_pipeline_v1_Matrix(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param": schema_pkg_apis_pipeline_v1_Param(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec": schema_pkg_apis_pipeline_v1_ParamSpec(ref), @@ -61,6 +61,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec": schema_pkg_apis_pipeline_v1_PropertySpec(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance": schema_pkg_apis_pipeline_v1_Provenance(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource": schema_pkg_apis_pipeline_v1_RefSource(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverRef": schema_pkg_apis_pipeline_v1_ResolverRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResultRef": schema_pkg_apis_pipeline_v1_ResultRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar": schema_pkg_apis_pipeline_v1_Sidecar(ref), @@ -439,49 +440,6 @@ func schema_pkg_apis_pipeline_v1_ChildStatusReference(ref common.ReferenceCallba } } -func schema_pkg_apis_pipeline_v1_ConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConfigSource identifies the source where a resource came from. This can include Git repositories, Task Bundles, file checksums, or other information that allows users to identify where the resource came from and what version was used.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "uri": { - SchemaProps: spec.SchemaProps{ - Description: "URI indicates the identity of the source of the config. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.uri Example: \"https://github.com/tektoncd/catalog\"", - Type: []string{"string"}, - Format: "", - }, - }, - "digest": { - SchemaProps: spec.SchemaProps{ - Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.digest Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "entryPoint": { - SchemaProps: spec.SchemaProps{ - Description: "EntryPoint identifies the entry point into the build. This is often a path to a configuration file and/or a target label within that file. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.entryPoint Example: \"task/git-clone/0.8/git-clone.yaml\"", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -533,6 +491,13 @@ func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) comm }, }, }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a user-facing description of the task that may be used to populate a UI.", @@ -649,6 +614,47 @@ func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) comm } } +func schema_pkg_apis_pipeline_v1_IncludeParams(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "IncludeParams allows passing in a specific combinations of Parameters into the Matrix.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the specified combination", + Type: []string{"string"}, + Format: "", + }, + }, + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params takes only `Parameters` of type `\"string\"` The names of the `params` must match the names of the `params` in the underlying `Task`", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, + } +} + func schema_pkg_apis_pipeline_v1_Matrix(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -675,11 +681,30 @@ func schema_pkg_apis_pipeline_v1_Matrix(ref common.ReferenceCallback) common.Ope }, }, }, + "include": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.IncludeParams"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.IncludeParams", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, } } @@ -778,14 +803,14 @@ func schema_pkg_apis_pipeline_v1_ParamValue(ref common.ReferenceCallback) common Description: "ResultValue is a type alias of ParamValue", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "type": { + "Type": { SchemaProps: spec.SchemaProps{ Default: "", Type: []string{"string"}, Format: "", }, }, - "stringVal": { + "StringVal": { SchemaProps: spec.SchemaProps{ Description: "Represents the stored type of ParamValues.", Default: "", @@ -793,7 +818,7 @@ func schema_pkg_apis_pipeline_v1_ParamValue(ref common.ReferenceCallback) common Format: "", }, }, - "arrayVal": { + "ArrayVal": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", @@ -812,7 +837,7 @@ func schema_pkg_apis_pipeline_v1_ParamValue(ref common.ReferenceCallback) common }, }, }, - "objectVal": { + "ObjectVal": { SchemaProps: spec.SchemaProps{ Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{ @@ -828,7 +853,7 @@ func schema_pkg_apis_pipeline_v1_ParamValue(ref common.ReferenceCallback) common }, }, }, - Required: []string{"type", "stringVal", "arrayVal", "objectVal"}, + Required: []string{"Type", "StringVal", "ArrayVal", "ObjectVal"}, }, }, } @@ -1410,6 +1435,22 @@ func schema_pkg_apis_pipeline_v1_PipelineRunStatus(ref common.ReferenceCallback) Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"), }, }, + "spanContext": { + SchemaProps: spec.SchemaProps{ + Description: "SpanContext contains tracing span context fields", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -1512,6 +1553,22 @@ func schema_pkg_apis_pipeline_v1_PipelineRunStatusFields(ref common.ReferenceCal Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"), }, }, + "spanContext": { + SchemaProps: spec.SchemaProps{ + Description: "SpanContext contains tracing span context fields", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -1574,6 +1631,13 @@ func schema_pkg_apis_pipeline_v1_PipelineSpec(ref common.ReferenceCallback) comm Description: "PipelineSpec defines the desired state of Pipeline.", Type: []string{"object"}, Properties: map[string]spec.Schema{ + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a user-facing name of the pipeline that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a user-facing description of the pipeline that may be used to populate a UI.", @@ -1698,6 +1762,20 @@ func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) comm Format: "", }, }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is the display name of this task within the context of a Pipeline. This display name may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is the description of this task within the context of a Pipeline. This description may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "taskRef": { SchemaProps: spec.SchemaProps{ Description: "TaskRef is a reference to a task definition.", @@ -2008,7 +2086,7 @@ func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.Referen return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding.\n\nDeprecated: use PipelineWorkspaceDeclaration type instead", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -2063,13 +2141,13 @@ func schema_pkg_apis_pipeline_v1_Provenance(ref common.ReferenceCallback) common return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.). For now, it only contains the subfield `ConfigSource` that identifies the source where a build config file came from. In future, it can be expanded as needed to include more metadata about the build. This field aims to be used to carry minimum amount of the authenticated metadata in *Run status so that Tekton Chains can pick it up and record in the provenance it generates.", + Description: "Provenance contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote build definition was fetched. This field aims to carry minimum amoumt of metadata in *Run status so that Tekton Chains can capture them in the provenance.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "configSource": { + "refSource": { SchemaProps: spec.SchemaProps{ - Description: "ConfigSource identifies the source where a resource came from.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ConfigSource"), + Description: "RefSource identifies the source where a remote task/pipeline came from.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, "featureFlags": { @@ -2082,7 +2160,50 @@ func schema_pkg_apis_pipeline_v1_Provenance(ref common.ReferenceCallback) common }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ConfigSource"}, + "github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"}, + } +} + +func schema_pkg_apis_pipeline_v1_RefSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RefSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "uri": { + SchemaProps: spec.SchemaProps{ + Description: "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"", + Type: []string{"string"}, + Format: "", + }, + }, + "digest": { + SchemaProps: spec.SchemaProps{ + Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "entryPoint": { + SchemaProps: spec.SchemaProps{ + Description: "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.8/git-clone.yaml\"", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } @@ -3141,14 +3262,14 @@ func schema_pkg_apis_pipeline_v1_TaskRef(ref common.ReferenceCallback) common.Op }, "kind": { SchemaProps: spec.SchemaProps{ - Description: "TaskKind indicates the kind of the task, namespaced or cluster scoped.", + Description: "TaskKind indicates the Kind of the Task: 1. Namespaced Task when Kind is set to \"Task\". If Kind is \"\", it defaults to \"Task\". 2. Custom Task when Kind is non-empty and APIVersion is non-empty", Type: []string{"string"}, Format: "", }, }, "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "API version of the referent", + Description: "API version of the referent Note: A Task with non-empty APIVersion and Kind is considered a Custom Task", Type: []string{"string"}, Format: "", }, @@ -3752,6 +3873,22 @@ func schema_pkg_apis_pipeline_v1_TaskRunStatus(ref common.ReferenceCallback) com Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"), }, }, + "spanContext": { + SchemaProps: spec.SchemaProps{ + Description: "SpanContext contains tracing span context fields", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"podName"}, }, @@ -3876,6 +4013,22 @@ func schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref common.ReferenceCallbac Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"), }, }, + "spanContext": { + SchemaProps: spec.SchemaProps{ + Description: "SpanContext contains tracing span context fields", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"podName"}, }, @@ -3942,6 +4095,13 @@ func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.O }, }, }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a user-facing description of the task that may be used to populate a UI.", diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go index 2269c0896c..746cd3d4cb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -23,8 +23,8 @@ import ( "regexp" "strings" - resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/substitution" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -62,6 +62,9 @@ type ParamSpec struct { Default *ParamValue `json:"default,omitempty"` } +// ParamSpecs is a list of ParamSpec +type ParamSpecs []ParamSpec + // PropertySpec defines the struct for object keys type PropertySpec struct { Type ParamType `json:"type,omitempty"` @@ -107,16 +110,253 @@ func (pp *ParamSpec) setDefaultsForProperties() { } } -// ResourceParam declares a string value to use for the parameter called Name, and is used in -// the specific context of PipelineResources. -type ResourceParam = resource.ResourceParam - // Param declares an ParamValues to use for the parameter called name. type Param struct { Name string `json:"name"` Value ParamValue `json:"value"` } +// ExtractNames returns a set of unique names +func (ps Params) ExtractNames() sets.String { + names := sets.String{} + for _, p := range ps { + names.Insert(p.Name) + } + return names +} + +func (ps Params) extractValues() []string { + pvs := []string{} + for i := range ps { + pvs = append(pvs, ps[i].Value.StringVal) + pvs = append(pvs, ps[i].Value.ArrayVal...) + for _, v := range ps[i].Value.ObjectVal { + pvs = append(pvs, v) + } + } + return pvs +} + +// extractParamMapArrVals creates a param map with the key: param.Name and +// val: param.Value.ArrayVal +func (ps Params) extractParamMapArrVals() map[string][]string { + paramsMap := make(map[string][]string) + for _, p := range ps { + paramsMap[p.Name] = p.Value.ArrayVal + } + return paramsMap +} + +// Params is a list of Param +type Params []Param + +// extractParamArrayLengths extract and return the lengths of all array params +// Example of returned value: {"a-array-params": 2,"b-array-params": 2 } +func (ps Params) extractParamArrayLengths() map[string]int { + // Collect all array params + arrayParamsLengths := make(map[string]int) + + // Collect array params lengths from params + for _, p := range ps { + if p.Value.Type == ParamTypeArray { + arrayParamsLengths[p.Name] = len(p.Value.ArrayVal) + } + } + return arrayParamsLengths +} + +// validateDuplicateParameters checks if a parameter with the same name is defined more than once +func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) { + taskParamNames := sets.NewString() + for i, param := range ps { + if taskParamNames.Has(param.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("parameter names must be unique,"+ + " the parameter \"%s\" is also defined at", param.Name), fmt.Sprintf("[%d].name", i))) + } + taskParamNames.Insert(param.Name) + } + return errs +} + +// extractParamArrayLengths extract and return the lengths of all array params +// Example of returned value: {"a-array-params": 2,"b-array-params": 2 } +func (ps ParamSpecs) extractParamArrayLengths() map[string]int { + // Collect all array params + arrayParamsLengths := make(map[string]int) + + // Collect array params lengths from defaults + for _, p := range ps { + if p.Default != nil { + if p.Default.Type == ParamTypeArray { + arrayParamsLengths[p.Name] = len(p.Default.ArrayVal) + } + } + } + return arrayParamsLengths +} + +// validateOutofBoundArrayParams validates if the array indexing params are out of bound +// example of arrayIndexingParams: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"] +// example of arrayParamsLengths: {"a-array-params": 2,"b-array-params": 2 } +func validateOutofBoundArrayParams(arrayIndexingParams []string, arrayParamsLengths map[string]int) error { + outofBoundParams := sets.String{} + for _, val := range arrayIndexingParams { + indexString := substitution.ExtractIndexString(val) + idx, _ := substitution.ExtractIndex(indexString) + // this will extract the param name from reference + // e.g. $(params.a-array-param[1]) -> a-array-param + paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params") + + if paramLength, ok := arrayParamsLengths[paramName[0]]; ok { + if idx >= paramLength { + outofBoundParams.Insert(val) + } + } + } + if outofBoundParams.Len() > 0 { + return fmt.Errorf("non-existent param references:%v", outofBoundParams.List()) + } + return nil +} + +// extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param. +// For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`, +// it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"]. +func extractArrayIndexingParamRefs(paramReference string) []string { + l := []string{} + list := substitution.ExtractParamsExpressions(paramReference) + for _, val := range list { + indexString := substitution.ExtractIndexString(val) + if indexString != "" { + l = append(l, val) + } + } + return l +} + +// extractParamRefsFromSteps get all array indexing references from steps +func extractParamRefsFromSteps(steps []Step) []string { + paramsRefs := []string{} + for _, step := range steps { + paramsRefs = append(paramsRefs, step.Script) + container := step.ToK8sContainer() + paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...) + } + return paramsRefs +} + +// extractParamRefsFromStepTemplate get all array indexing references from StepsTemplate +func extractParamRefsFromStepTemplate(stepTemplate *StepTemplate) []string { + if stepTemplate == nil { + return nil + } + container := stepTemplate.ToK8sContainer() + return extractParamRefsFromContainer(container) +} + +// extractParamRefsFromSidecars get all array indexing references from sidecars +func extractParamRefsFromSidecars(sidecars []Sidecar) []string { + paramsRefs := []string{} + for _, s := range sidecars { + paramsRefs = append(paramsRefs, s.Script) + container := s.ToK8sContainer() + paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...) + } + return paramsRefs +} + +// extractParamRefsFromVolumes get all array indexing references from volumes +func extractParamRefsFromVolumes(volumes []corev1.Volume) []string { + paramsRefs := []string{} + for i, v := range volumes { + paramsRefs = append(paramsRefs, v.Name) + if v.VolumeSource.ConfigMap != nil { + paramsRefs = append(paramsRefs, v.ConfigMap.Name) + for _, item := range v.ConfigMap.Items { + paramsRefs = append(paramsRefs, item.Key) + paramsRefs = append(paramsRefs, item.Path) + } + } + if v.VolumeSource.Secret != nil { + paramsRefs = append(paramsRefs, v.Secret.SecretName) + for _, item := range v.Secret.Items { + paramsRefs = append(paramsRefs, item.Key) + paramsRefs = append(paramsRefs, item.Path) + } + } + if v.PersistentVolumeClaim != nil { + paramsRefs = append(paramsRefs, v.PersistentVolumeClaim.ClaimName) + } + if v.Projected != nil { + for _, s := range volumes[i].Projected.Sources { + if s.ConfigMap != nil { + paramsRefs = append(paramsRefs, s.ConfigMap.Name) + } + if s.Secret != nil { + paramsRefs = append(paramsRefs, s.Secret.Name) + } + if s.ServiceAccountToken != nil { + paramsRefs = append(paramsRefs, s.ServiceAccountToken.Audience) + } + } + } + if v.CSI != nil { + if v.CSI.NodePublishSecretRef != nil { + paramsRefs = append(paramsRefs, v.CSI.NodePublishSecretRef.Name) + } + if v.CSI.VolumeAttributes != nil { + for _, value := range v.CSI.VolumeAttributes { + paramsRefs = append(paramsRefs, value) + } + } + } + } + return paramsRefs +} + +// extractParamRefsFromContainer get all array indexing references from container +func extractParamRefsFromContainer(c *corev1.Container) []string { + paramsRefs := []string{} + paramsRefs = append(paramsRefs, c.Name) + paramsRefs = append(paramsRefs, c.Image) + paramsRefs = append(paramsRefs, string(c.ImagePullPolicy)) + paramsRefs = append(paramsRefs, c.Args...) + + for ie, e := range c.Env { + paramsRefs = append(paramsRefs, e.Value) + if c.Env[ie].ValueFrom != nil { + if e.ValueFrom.SecretKeyRef != nil { + paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.LocalObjectReference.Name) + paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.Key) + } + if e.ValueFrom.ConfigMapKeyRef != nil { + paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name) + paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.Key) + } + } + } + + for _, e := range c.EnvFrom { + paramsRefs = append(paramsRefs, e.Prefix) + if e.ConfigMapRef != nil { + paramsRefs = append(paramsRefs, e.ConfigMapRef.LocalObjectReference.Name) + } + if e.SecretRef != nil { + paramsRefs = append(paramsRefs, e.SecretRef.LocalObjectReference.Name) + } + } + + paramsRefs = append(paramsRefs, c.WorkingDir) + paramsRefs = append(paramsRefs, c.Command...) + + for _, v := range c.VolumeMounts { + paramsRefs = append(paramsRefs, v.Name) + paramsRefs = append(paramsRefs, v.MountPath) + paramsRefs = append(paramsRefs, v.SubPath) + } + return paramsRefs +} + // ParamType indicates the type of an input parameter; // Used to distinguish between a single string and an array of strings. type ParamType string @@ -137,11 +377,11 @@ var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject // Used in JSON unmarshalling so that a single JSON field can accept // either an individual string or an array of strings. type ParamValue struct { - Type ParamType `json:"type"` // Represents the stored type of ParamValues. - StringVal string `json:"stringVal"` + Type ParamType // Represents the stored type of ParamValues. + StringVal string // +listType=atomic - ArrayVal []string `json:"arrayVal"` - ObjectVal map[string]string `json:"objectVal"` + ArrayVal []string + ObjectVal map[string]string } // UnmarshalJSON implements the json.Unmarshaller interface. @@ -214,6 +454,8 @@ func (paramValues *ParamValue) ApplyReplacements(stringReplacements map[string]s newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) } paramValues.ObjectVal = newObjectVal + case ParamTypeString: + fallthrough default: paramValues.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) } @@ -291,12 +533,9 @@ func ArrayReference(a string) string { // validatePipelineParametersVariablesInTaskParameters validates param value that // may contain the reference(s) to other params to make sure those references are used appropriately. -func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { - taskParamNames := sets.NewString() - for i, param := range params { - if taskParamNames.Has(param.Name) { - errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("params names must be unique, the same param: %s is defined multiple times at", param.Name), fmt.Sprintf("params[%d].name", i))) - } +func validatePipelineParametersVariablesInTaskParameters(params Params, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + errs = errs.Also(params.validateDuplicateParameters()).ViaField("params") + for _, param := range params { switch param.Value.Type { case ParamTypeArray: for idx, arrayElement := range param.Value.ArrayVal { @@ -306,47 +545,11 @@ func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix for key, val := range param.Value.ObjectVal { errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldKey("properties", key).ViaFieldKey("params", param.Name)) } + case ParamTypeString: + fallthrough default: errs = errs.Also(validateParamStringValue(param, prefix, paramNames, arrayParamNames, objectParamNameKeys)) } - taskParamNames.Insert(param.Name) - } - return errs -} - -// validatePipelineParametersVariablesInMatrixParameters validates matrix param value -// that may contain the reference(s) to other params to make sure those references are used appropriately. -func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { - for _, param := range matrix { - for idx, arrayElement := range param.Value.ArrayVal { - errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix", param.Name)) - } - } - return errs -} - -func validateParametersInTaskMatrix(matrix *Matrix) (errs *apis.FieldError) { - if matrix != nil { - for _, param := range matrix.Params { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) - } - } - } - return errs -} - -func validateParameterInOneOfMatrixOrParams(matrix *Matrix, params []Param) (errs *apis.FieldError) { - matrixParameterNames := sets.NewString() - if matrix != nil { - for _, param := range matrix.Params { - matrixParameterNames.Insert(param.Name) - } - } - for _, param := range params { - if matrixParameterNames.Has(param.Name) { - errs = errs.Also(apis.ErrMultipleOneOf("matrix["+param.Name+"]", "params["+param.Name+"]")) - } } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_defaults.go index 594e1761e5..a6c7190e8e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_defaults.go @@ -19,6 +19,7 @@ package v1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/config" "knative.dev/pkg/apis" ) @@ -36,25 +37,27 @@ func (ps *PipelineSpec) SetDefaults(ctx context.Context) { } for _, pt := range ps.Tasks { - if pt.TaskRef != nil { - if pt.TaskRef.Kind == "" { - pt.TaskRef.Kind = NamespacedTaskKind - } - } - if pt.TaskSpec != nil { - pt.TaskSpec.SetDefaults(ctx) - } + pt.SetDefaults(ctx) } for _, ft := range ps.Finally { ctx := ctx // Ensure local scoping per Task - if ft.TaskRef != nil { - if ft.TaskRef.Kind == "" { - ft.TaskRef.Kind = NamespacedTaskKind - } + ft.SetDefaults(ctx) + } +} + +// SetDefaults sets default values for a PipelineTask +func (pt *PipelineTask) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) + if pt.TaskRef != nil { + if pt.TaskRef.Kind == "" { + pt.TaskRef.Kind = NamespacedTaskKind } - if ft.TaskSpec != nil { - ft.TaskSpec.SetDefaults(ctx) + if pt.TaskRef.Name == "" && pt.TaskRef.Resolver == "" { + pt.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType) } } + if pt.TaskSpec != nil { + pt.TaskSpec.SetDefaults(ctx) + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go index fec422ae97..d338944881 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go @@ -17,21 +17,12 @@ limitations under the License. package v1 import ( - "context" - "fmt" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" - "github.com/tektoncd/pipeline/pkg/apis/version" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" "knative.dev/pkg/kmeta" ) @@ -81,6 +72,10 @@ func (*Pipeline) GetGroupVersionKind() schema.GroupVersionKind { // PipelineSpec defines the desired state of Pipeline. type PipelineSpec struct { + // DisplayName is a user-facing name of the pipeline that may be + // used to populate a UI. + // +optional + DisplayName string `json:"displayName,omitempty"` // Description is a user-facing description of the pipeline that may be // used to populate a UI. // +optional @@ -91,7 +86,7 @@ type PipelineSpec struct { // Params declares a list of input parameters that must be supplied when // this Pipeline is run. // +listType=atomic - Params []ParamSpec `json:"params,omitempty"` + Params ParamSpecs `json:"params,omitempty"` // Workspaces declares a set of named workspaces that are expected to be // provided by a PipelineRun. // +optional @@ -160,6 +155,16 @@ type PipelineTask struct { // the execution order of tasks relative to one another. Name string `json:"name,omitempty"` + // DisplayName is the display name of this task within the context of a Pipeline. + // This display name may be used to populate a UI. + // +optional + DisplayName string `json:"displayName,omitempty"` + + // Description is the description of this task within the context of a Pipeline. + // This description may be used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + // TaskRef is a reference to a task definition. // +optional TaskRef *TaskRef `json:"taskRef,omitempty"` @@ -185,7 +190,7 @@ type PipelineTask struct { // Parameters declares parameters passed to this task. // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` // Matrix declares parameters used to fan out this task. // +optional @@ -204,243 +209,16 @@ type PipelineTask struct { Timeout *metav1.Duration `json:"timeout,omitempty"` } -// Matrix is used to fan out Tasks in a Pipeline -type Matrix struct { - // Params is a list of parameters used to fan out the pipelineTask - // Params takes only `Parameters` of type `"array"` - // Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`. - // The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting. - // +listType=atomic - Params []Param `json:"params,omitempty"` -} - -// validateRefOrSpec validates at least one of taskRef or taskSpec is specified -func (pt PipelineTask) validateRefOrSpec() (errs *apis.FieldError) { - // can't have both taskRef and taskSpec at the same time - if pt.TaskRef != nil && pt.TaskSpec != nil { - errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec")) - } - // Check that one of TaskRef and TaskSpec is present - if pt.TaskRef == nil && pt.TaskSpec == nil { - errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec")) - } - return errs -} - -// validateCustomTask validates custom task specifications - checking kind and fail if not yet supported features specified -func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { - if pt.TaskRef != nil && pt.TaskRef.Kind == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify kind", "taskRef.kind")) - } - if pt.TaskSpec != nil && pt.TaskSpec.Kind == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind")) - } - if pt.TaskRef != nil && pt.TaskRef.APIVersion == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify apiVersion", "taskRef.apiVersion")) - } - if pt.TaskSpec != nil && pt.TaskSpec.APIVersion == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion")) - } - return errs -} - -// validateTask validates a pipeline task or a final task for taskRef and taskSpec -func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - // Validate TaskSpec if it's present - if pt.TaskSpec != nil { - errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) - } - if pt.TaskRef != nil { - if pt.TaskRef.Name != "" { - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { - errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) - } - } else if pt.TaskRef.Resolver == "" { - errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) - } - if cfg.FeatureFlags.EnableAPIFields != config.BetaAPIFields && cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - // fail if resolver or resource are present when enable-api-fields is false. - if pt.TaskRef.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) - } - if len(pt.TaskRef.Params) > 0 { - errs = errs.Also(apis.ErrDisallowedFields("taskref.params")) - } - } - } - return errs +// IsCustomTask checks whether an embedded TaskSpec is a Custom Task +func (et *EmbeddedTask) IsCustomTask() bool { + // Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`, + // the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457 + return et != nil && et.APIVersion != "" && et.Kind != "" } // IsMatrixed return whether pipeline task is matrixed func (pt *PipelineTask) IsMatrixed() bool { - return pt.Matrix != nil && len(pt.Matrix.Params) > 0 -} - -func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { - if pt.IsMatrixed() { - // This is an alpha feature and will fail validation if it's used in a pipeline spec - // when the enable-api-fields feature gate is anything but "alpha". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) - errs = errs.Also(pt.validateMatrixCombinationsCount(ctx)) - } - errs = errs.Also(validateParameterInOneOfMatrixOrParams(pt.Matrix, pt.Params)) - errs = errs.Also(validateParametersInTaskMatrix(pt.Matrix)) - return errs -} - -func (pt *PipelineTask) validateMatrixCombinationsCount(ctx context.Context) (errs *apis.FieldError) { - matrixCombinationsCount := pt.GetMatrixCombinationsCount() - maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount - if matrixCombinationsCount > maxMatrixCombinationsCount { - errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix")) - } - return errs -} - -func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { - // Reject cases where APIVersion and/or Kind are specified alongside an embedded Task. - // We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items. - if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 { - if pt.TaskSpec.APIVersion != "" { - errs = errs.Also(&apis.FieldError{ - Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps", - Paths: []string{"taskSpec.apiVersion"}, - }) - } - if pt.TaskSpec.Kind != "" { - errs = errs.Also(&apis.FieldError{ - Message: "taskSpec.kind cannot be specified when using taskSpec.steps", - Paths: []string{"taskSpec.kind"}, - }) - } - } - return -} - -// GetMatrixCombinationsCount returns the count of combinations of Parameters generated from the Matrix in PipelineTask. -func (pt *PipelineTask) GetMatrixCombinationsCount() int { - if !pt.IsMatrixed() { - return 0 - } - count := 1 - for _, param := range pt.Matrix.Params { - count *= len(param.Value.ArrayVal) - } - return count -} - -func (pt *PipelineTask) validateResultsFromMatrixedPipelineTasksNotConsumed(matrixedPipelineTasks sets.String) (errs *apis.FieldError) { - for _, ref := range PipelineTaskResultRefs(pt) { - if matrixedPipelineTasks.Has(ref.PipelineTask) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("consuming results from matrixed task %s is not allowed", ref.PipelineTask), "")) - } - } - return errs -} - -func (pt *PipelineTask) validateExecutionStatusVariablesDisallowed() (errs *apis.FieldError) { - for _, param := range pt.Params { - if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { - errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "value"). - ViaFieldKey("params", param.Name)) - } - } - for i, we := range pt.When { - if expressions, ok := we.GetVarSubstitutionExpressions(); ok { - errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, ""). - ViaFieldIndex("when", i)) - } - } - return errs -} - -func (pt *PipelineTask) validateExecutionStatusVariablesAllowed(ptNames sets.String) (errs *apis.FieldError) { - for _, param := range pt.Params { - if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { - errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "value"). - ViaFieldKey("params", param.Name)) - } - } - for i, we := range pt.When { - if expressions, ok := we.GetVarSubstitutionExpressions(); ok { - errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, ""). - ViaFieldIndex("when", i)) - } - } - return errs -} - -func validateContainsExecutionStatusVariablesDisallowed(expressions []string, path string) (errs *apis.FieldError) { - if containsExecutionStatusReferences(expressions) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline tasks can not refer to execution status"+ - " of any other pipeline task or aggregate status of tasks"), path)) - } - return errs -} - -func containsExecutionStatusReferences(expressions []string) bool { - // validate tasks.pipelineTask.status/tasks.status if this expression is not a result reference - if !LooksLikeContainsResultRefs(expressions) { - for _, e := range expressions { - // check if it contains context variable accessing execution status - $(tasks.taskname.status) - // or an aggregate status - $(tasks.status) - if containsExecutionStatusRef(e) { - return true - } - } - } - return false -} - -func validateExecutionStatusVariablesExpressions(expressions []string, ptNames sets.String, fieldPath string) (errs *apis.FieldError) { - // validate tasks.pipelineTask.status if this expression is not a result reference - if !LooksLikeContainsResultRefs(expressions) { - for _, expression := range expressions { - // its a reference to aggregate status of dag tasks - $(tasks.status) - if expression == PipelineTasksAggregateStatus { - continue - } - // check if it contains context variable accessing execution status - $(tasks.taskname.status) - if containsExecutionStatusRef(expression) { - // strip tasks. and .status from tasks.taskname.status to further verify task name - pt := strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status") - // report an error if the task name does not exist in the list of dag tasks - if !ptNames.Has(pt) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath)) - } - } - } - } - return errs -} - -func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) { - workspaceBindingNames := sets.NewString() - for i, ws := range pt.Workspaces { - if workspaceBindingNames.Has(ws.Name) { - errs = errs.Also(apis.ErrGeneric( - fmt.Sprintf("workspace name %q must be unique", ws.Name), "").ViaFieldIndex("workspaces", i)) - } - - if ws.Workspace == "" { - if !workspaceNames.Has(ws.Name) { - errs = errs.Also(apis.ErrInvalidValue( - fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name), - "", - ).ViaFieldIndex("workspaces", i)) - } - } else if !workspaceNames.Has(ws.Workspace) { - errs = errs.Also(apis.ErrInvalidValue( - fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), - "", - ).ViaFieldIndex("workspaces", i)) - } - - workspaceBindingNames.Insert(ws.Name) - } - return errs + return pt.Matrix.HasParams() || pt.Matrix.HasInclude() } // TaskSpecMetadata returns the metadata of the PipelineTask's EmbeddedTask spec. @@ -453,38 +231,6 @@ func (pt PipelineTask) HashKey() string { return pt.Name } -// ValidateName checks whether the PipelineTask's name is a valid DNS label -func (pt PipelineTask) ValidateName() *apis.FieldError { - if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", pt.Name), - Paths: []string{"name"}, - Details: "Pipeline Task name must be a valid DNS Label." + - "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - return nil -} - -// Validate classifies whether a task is a custom task or a regular task(dag/final) -// calls the validation routine based on the type of the task -func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { - errs = errs.Also(pt.validateRefOrSpec()) - - errs = errs.Also(pt.validateEmbeddedOrType()) - - // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task - switch { - case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": - errs = errs.Also(pt.validateCustomTask()) - case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": - errs = errs.Also(pt.validateCustomTask()) - default: - errs = errs.Also(pt.validateTask(ctx)) - } - return -} - // Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering func (pt PipelineTask) Deps() []string { // hold the list of dependencies in a set to avoid duplicates @@ -538,22 +284,6 @@ func (l PipelineTaskList) Names() sets.String { return names } -// Validate a list of pipeline tasks including custom task -func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, path string) (errs *apis.FieldError) { - for i, t := range l { - // validate pipeline task name - errs = errs.Also(t.ValidateName().ViaFieldIndex(path, i)) - // names cannot be duplicated - checking that pipelineTask names are unique - if _, ok := taskNames[t.Name]; ok { - errs = errs.Also(apis.ErrMultipleOneOf("name").ViaFieldIndex(path, i)) - } - taskNames.Insert(t.Name) - // validate custom task, dag, or final task - errs = errs.Also(t.Validate(ctx).ViaFieldIndex(path, i)) - } - return errs -} - // PipelineTaskParam is used to provide arbitrary string parameters to a Task. type PipelineTaskParam struct { Name string `json:"name"` diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go index 1ab92eb890..0c60fae33b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go @@ -23,11 +23,13 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" "github.com/tektoncd/pipeline/pkg/substitution" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" "knative.dev/pkg/webhook/resourcesemantics" ) @@ -89,6 +91,181 @@ func ValidatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks return errs } +// Validate a list of pipeline tasks including custom task +func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, path string) (errs *apis.FieldError) { + for i, t := range l { + // validate pipeline task name + errs = errs.Also(t.ValidateName().ViaFieldIndex(path, i)) + // names cannot be duplicated - checking that pipelineTask names are unique + if _, ok := taskNames[t.Name]; ok { + errs = errs.Also(apis.ErrMultipleOneOf("name").ViaFieldIndex(path, i)) + } + taskNames.Insert(t.Name) + // validate custom task, dag, or final task + errs = errs.Also(t.Validate(ctx).ViaFieldIndex(path, i)) + } + return errs +} + +// ValidateName checks whether the PipelineTask's name is a valid DNS label +func (pt PipelineTask) ValidateName() *apis.FieldError { + if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("invalid value %q", pt.Name), + Paths: []string{"name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + } + } + return nil +} + +// Validate classifies whether a task is a custom task or a regular task(dag/final) +// calls the validation routine based on the type of the task +func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(pt.validateRefOrSpec()) + + errs = errs.Also(pt.validateEmbeddedOrType()) + + // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task + switch { + case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": + errs = errs.Also(pt.validateCustomTask()) + case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": + errs = errs.Also(pt.validateCustomTask()) + default: + errs = errs.Also(pt.validateTask(ctx)) + } + return +} + +func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { + if pt.IsMatrixed() { + // This is an alpha feature and will fail validation if it's used in a pipeline spec + // when the enable-api-fields feature gate is anything but "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) + errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx)) + } + errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params)) + errs = errs.Also(pt.Matrix.validateParams()) + return errs +} + +func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { + // Reject cases where APIVersion and/or Kind are specified alongside an embedded Task. + // We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items. + if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 { + if pt.TaskSpec.APIVersion != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.apiVersion"}, + }) + } + if pt.TaskSpec.Kind != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.kind cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.kind"}, + }) + } + } + return +} + +func (pt *PipelineTask) validateResultsFromMatrixedPipelineTasksNotConsumed(matrixedPipelineTasks sets.String) (errs *apis.FieldError) { + for _, ref := range PipelineTaskResultRefs(pt) { + if matrixedPipelineTasks.Has(ref.PipelineTask) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("consuming results from matrixed task %s is not allowed", ref.PipelineTask), "")) + } + } + return errs +} + +func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) { + workspaceBindingNames := sets.NewString() + for i, ws := range pt.Workspaces { + if workspaceBindingNames.Has(ws.Name) { + errs = errs.Also(apis.ErrGeneric( + fmt.Sprintf("workspace name %q must be unique", ws.Name), "").ViaFieldIndex("workspaces", i)) + } + + if ws.Workspace == "" { + if !workspaceNames.Has(ws.Name) { + errs = errs.Also(apis.ErrInvalidValue( + fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name), + "", + ).ViaFieldIndex("workspaces", i)) + } + } else if !workspaceNames.Has(ws.Workspace) { + errs = errs.Also(apis.ErrInvalidValue( + fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), + "", + ).ViaFieldIndex("workspaces", i)) + } + + workspaceBindingNames.Insert(ws.Name) + } + return errs +} + +// validateRefOrSpec validates at least one of taskRef or taskSpec is specified +func (pt PipelineTask) validateRefOrSpec() (errs *apis.FieldError) { + // can't have both taskRef and taskSpec at the same time + if pt.TaskRef != nil && pt.TaskSpec != nil { + errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec")) + } + // Check that one of TaskRef and TaskSpec is present + if pt.TaskRef == nil && pt.TaskSpec == nil { + errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec")) + } + return errs +} + +// validateCustomTask validates custom task specifications - checking kind and fail if not yet supported features specified +func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { + if pt.TaskRef != nil && pt.TaskRef.Kind == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify kind", "taskRef.kind")) + } + if pt.TaskSpec != nil && pt.TaskSpec.Kind == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind")) + } + if pt.TaskRef != nil && pt.TaskRef.APIVersion == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify apiVersion", "taskRef.apiVersion")) + } + if pt.TaskSpec != nil && pt.TaskSpec.APIVersion == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion")) + } + return errs +} + +// validateTask validates a pipeline task or a final task for taskRef and taskSpec +func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { + cfg := config.FromContextOrDefaults(ctx) + // Validate TaskSpec if it's present + if pt.TaskSpec != nil { + errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) + } + if pt.TaskRef != nil { + if pt.TaskRef.Name != "" { + // TaskRef name must be a valid k8s name + if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) + } + } else if pt.TaskRef.Resolver == "" { + errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) + } + if cfg.FeatureFlags.EnableAPIFields != config.BetaAPIFields && cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { + // fail if resolver or resource are present when enable-api-fields is false. + if pt.TaskRef.Resolver != "" { + errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) + } + if len(pt.TaskRef.Params) > 0 { + errs = errs.Also(apis.ErrDisallowedFields("taskref.params")) + } + } + } + return errs +} + // validatePipelineWorkspacesDeclarations validates the specified workspaces, ensuring having unique name without any // empty string, func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) (errs *apis.FieldError) { @@ -162,7 +339,7 @@ func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, pa for idx, task := range tasks { errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) if task.IsMatrixed() { - errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + errs = errs.Also(task.Matrix.validatePipelineParametersVariablesInMatrixParameters(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } errs = errs.Also(task.When.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } @@ -183,14 +360,7 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { ) var paramValues []string for _, task := range tasks { - var matrixParams []Param - if task.IsMatrixed() { - matrixParams = task.Matrix.Params - } - for _, param := range append(task.Params, matrixParams...) { - paramValues = append(paramValues, param.Value.StringVal) - paramValues = append(paramValues, param.Value.ArrayVal...) - } + paramValues = task.extractAllParams().extractValues() } errs := validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineRun", pipelineRunContextNames). Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipeline", pipelineContextNames)). @@ -198,6 +368,23 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { return errs } +// extractAllParams extracts all the parameters in a PipelineTask: +// - pt.Params +// - pt.Matrix.Params +// - pt.Matrix.Include.Params +func (pt *PipelineTask) extractAllParams() Params { + allParams := pt.Params + if pt.Matrix.HasParams() { + allParams = append(allParams, pt.Matrix.Params...) + } + if pt.Matrix.HasInclude() { + for _, include := range pt.Matrix.Include { + allParams = append(allParams, include.Params...) + } + } + return allParams +} + func containsExecutionStatusRef(p string) bool { if strings.HasPrefix(p, "tasks.") && strings.HasSuffix(p, ".status") { return true @@ -205,6 +392,12 @@ func containsExecutionStatusRef(p string) bool { return false } +func validateExecutionStatusVariables(tasks []PipelineTask, finallyTasks []PipelineTask) (errs *apis.FieldError) { + errs = errs.Also(validateExecutionStatusVariablesInTasks(tasks).ViaField("tasks")) + errs = errs.Also(validateExecutionStatusVariablesInFinally(PipelineTaskList(tasks).Names(), finallyTasks).ViaField("finally")) + return errs +} + // validate dag pipeline tasks, task params can not access execution status of any other task // dag tasks cannot have param value as $(tasks.pipelineTask.status) func validateExecutionStatusVariablesInTasks(tasks []PipelineTask) (errs *apis.FieldError) { @@ -223,12 +416,81 @@ func validateExecutionStatusVariablesInFinally(tasksNames sets.String, finally [ return errs } -func validateExecutionStatusVariables(tasks []PipelineTask, finallyTasks []PipelineTask) (errs *apis.FieldError) { - errs = errs.Also(validateExecutionStatusVariablesInTasks(tasks).ViaField("tasks")) - errs = errs.Also(validateExecutionStatusVariablesInFinally(PipelineTaskList(tasks).Names(), finallyTasks).ViaField("finally")) +func (pt *PipelineTask) validateExecutionStatusVariablesDisallowed() (errs *apis.FieldError) { + for _, param := range pt.Params { + if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { + errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "value"). + ViaFieldKey("params", param.Name)) + } + } + for i, we := range pt.When { + if expressions, ok := we.GetVarSubstitutionExpressions(); ok { + errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, ""). + ViaFieldIndex("when", i)) + } + } + return errs +} + +func (pt *PipelineTask) validateExecutionStatusVariablesAllowed(ptNames sets.String) (errs *apis.FieldError) { + for _, param := range pt.Params { + if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { + errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "value"). + ViaFieldKey("params", param.Name)) + } + } + for i, we := range pt.When { + if expressions, ok := we.GetVarSubstitutionExpressions(); ok { + errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, ""). + ViaFieldIndex("when", i)) + } + } + return errs +} + +func validateContainsExecutionStatusVariablesDisallowed(expressions []string, path string) (errs *apis.FieldError) { + if containsExecutionStatusReferences(expressions) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline tasks can not refer to execution status"+ + " of any other pipeline task or aggregate status of tasks"), path)) + } return errs } +func containsExecutionStatusReferences(expressions []string) bool { + // validate tasks.pipelineTask.status/tasks.status if this expression is not a result reference + if !LooksLikeContainsResultRefs(expressions) { + for _, e := range expressions { + // check if it contains context variable accessing execution status - $(tasks.taskname.status) + // or an aggregate status - $(tasks.status) + if containsExecutionStatusRef(e) { + return true + } + } + } + return false +} + +func validateExecutionStatusVariablesExpressions(expressions []string, ptNames sets.String, fieldPath string) (errs *apis.FieldError) { + // validate tasks.pipelineTask.status if this expression is not a result reference + if !LooksLikeContainsResultRefs(expressions) { + for _, expression := range expressions { + // its a reference to aggregate status of dag tasks - $(tasks.status) + if expression == PipelineTasksAggregateStatus { + continue + } + // check if it contains context variable accessing execution status - $(tasks.taskname.status) + if containsExecutionStatusRef(expression) { + // strip tasks. and .status from tasks.taskname.status to further verify task name + pt := strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status") + // report an error if the task name does not exist in the list of dag tasks + if !ptNames.Has(pt) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath)) + } + } + } + } + return errs +} func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) { for _, paramValue := range paramValues { errs = errs.Also(substitution.ValidateVariableP(paramValue, prefix, contextNames).ViaField("value")) @@ -412,3 +674,51 @@ func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, f } return errs } + +// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. +// error is returned when the array indexing reference is out of bound of the array param +// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. +func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { + if !config.CheckAlphaOrBetaAPIFields(ctx) { + return nil + } + + // Collect all array params lengths + arrayParamsLengths := ps.Params.extractParamArrayLengths() + for k, v := range params.extractParamArrayLengths() { + arrayParamsLengths[k] = v + } + + paramsRefs := []string{} + for i := range ps.Tasks { + paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...) + if ps.Tasks[i].IsMatrixed() { + paramsRefs = append(paramsRefs, ps.Tasks[i].Matrix.Params.extractValues()...) + } + for j := range ps.Tasks[i].Workspaces { + paramsRefs = append(paramsRefs, ps.Tasks[i].Workspaces[j].SubPath) + } + for _, wes := range ps.Tasks[i].When { + paramsRefs = append(paramsRefs, wes.Input) + paramsRefs = append(paramsRefs, wes.Values...) + } + } + + for i := range ps.Finally { + paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...) + if ps.Finally[i].IsMatrixed() { + paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...) + } + for _, wes := range ps.Finally[i].When { + paramsRefs = append(paramsRefs, wes.Values...) + } + } + + // extract all array indexing references, for example []{"$(params.array-params[1])"} + arrayIndexParamRefs := []string{} + for _, p := range paramsRefs { + arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) + } + + return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go index e68b775115..ee91841717 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go @@ -39,7 +39,7 @@ func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) { } } if ref.Params != nil { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "params", config.BetaAPIFields).ViaField("params")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params")) if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "params")) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go index d386021959..e53efe5e5a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go @@ -36,8 +36,15 @@ func (pr *PipelineRun) SetDefaults(ctx context.Context) { // SetDefaults implements apis.Defaultable func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { cfg := config.FromContextOrDefaults(ctx) + if prs.PipelineRef != nil && prs.PipelineRef.Name == "" && prs.PipelineRef.Resolver == "" { + prs.PipelineRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType) + } + + if prs.Timeouts == nil { + prs.Timeouts = &TimeoutFields{} + } - if prs.Timeouts != nil && prs.Timeouts.Pipeline == nil { + if prs.Timeouts.Pipeline == nil { prs.Timeouts.Pipeline = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go index 1976caefa2..4f4e33b3a1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go @@ -20,15 +20,14 @@ import ( "context" "time" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "github.com/tektoncd/pipeline/pkg/apis/config" apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" runv1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/clock" @@ -224,7 +223,7 @@ type PipelineRunSpec struct { PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"` // Params is a list of parameter names and values. // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` // Used for cancelling a pipelinerun (and maybe more later on) // +optional @@ -429,6 +428,9 @@ type PipelineRunStatusFields struct { // Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.). // +optional Provenance *Provenance `json:"provenance,omitempty"` + + // SpanContext contains tracing span context fields + SpanContext map[string]string `json:"spanContext,omitempty"` } // SkippedTask is used to describe the Tasks that were skipped due to their When Expressions @@ -467,6 +469,8 @@ const ( TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached" // FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally. FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached" + // EmptyArrayInMatrixParams means the task was skipped because Matrix parameters contain empty array. + EmptyArrayInMatrixParams SkippingReason = "Matrix Parameters have an empty array" // None means the task was not skipped None SkippingReason = "None" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go index 76d3417cc0..d5e8361f8c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go @@ -190,7 +190,7 @@ func appendParamSpec(paramSpec []ParamSpec, params []ParamSpec) []ParamSpec { return paramSpec } -func appendParam(paramSpec []ParamSpec, params []Param) []ParamSpec { +func appendParam(paramSpec []ParamSpec, params Params) []ParamSpec { for _, p := range params { skip := false for _, ps := range paramSpec { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/provenance.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/provenance.go index 2539f97bda..de9f2a5c5d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/provenance.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/provenance.go @@ -15,37 +15,32 @@ package v1 import "github.com/tektoncd/pipeline/pkg/apis/config" -// Provenance contains some key authenticated metadata about how a software artifact was -// built (what sources, what inputs/outputs, etc.). For now, it only contains the subfield -// `ConfigSource` that identifies the source where a build config file came from. -// In future, it can be expanded as needed to include more metadata about the build. -// This field aims to be used to carry minimum amount of the authenticated metadata in *Run status -// so that Tekton Chains can pick it up and record in the provenance it generates. +// Provenance contains metadata about resources used in the TaskRun/PipelineRun +// such as the source from where a remote build definition was fetched. +// This field aims to carry minimum amoumt of metadata in *Run status so that +// Tekton Chains can capture them in the provenance. type Provenance struct { - // ConfigSource identifies the source where a resource came from. - ConfigSource *ConfigSource `json:"configSource,omitempty"` + // RefSource identifies the source where a remote task/pipeline came from. + RefSource *RefSource `json:"refSource,omitempty"` // FeatureFlags identifies the feature flags that were used during the task/pipeline run FeatureFlags *config.FeatureFlags `json:"featureFlags,omitempty"` } -// ConfigSource identifies the source where a resource came from. -// This can include Git repositories, Task Bundles, file checksums, or other information -// that allows users to identify where the resource came from and what version was used. -type ConfigSource struct { - // URI indicates the identity of the source of the config. - // Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.uri +// RefSource contains the information that can uniquely identify where a remote +// built definition came from i.e. Git repositories, Tekton Bundles in OCI registry +// and hub. +type RefSource struct { + // URI indicates the identity of the source of the build definition. // Example: "https://github.com/tektoncd/catalog" URI string `json:"uri,omitempty"` // Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. - // Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.digest // Example: {"sha1": "f99d13e554ffcb696dee719fa85b695cb5b0f428"} Digest map[string]string `json:"digest,omitempty"` // EntryPoint identifies the entry point into the build. This is often a path to a - // configuration file and/or a target label within that file. - // Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.entryPoint + // build definition file and/or a target label within that file. // Example: "task/git-clone/0.8/git-clone.yaml" EntryPoint string `json:"entryPoint,omitempty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go index c27b0decfc..095a9d00a2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go @@ -34,5 +34,5 @@ type ResolverRef struct { // the chosen resolver. // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go index a64c867f22..3a5b97d919 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go @@ -54,7 +54,7 @@ type ResultValue = ParamValue // ResultsType indicates the type of a result; // Used to distinguish between a single string and an array of strings. // Note that there is ResultType used to find out whether a -// PipelineResourceResult is from a task result or not, which is different from +// RunResult is from a task result or not, which is different from // this ResultsType. type ResultsType string diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go index cdeca07cef..1fd9ddd6b1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go @@ -35,10 +35,10 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } switch { - // Object are alpha features + // Object results is beta feature - check if the feature flag is set to "beta" or "alpha" case tr.Type == ResultsTypeObject: errs := validateObjectResult(tr) - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs // Array results is a beta feature - check if the feature flag is set to "beta" or "alpha" case tr.Type == ResultsTypeArray: diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go index 8fcc1b9f5a..1dcd06e6e2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go @@ -200,19 +200,13 @@ func ParseResultName(resultName string) (string, string) { // in a PipelineTask and returns a list of any references that are found. func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef { refs := []*ResultRef{} - var matrixParams []Param - if pt.IsMatrixed() { - matrixParams = pt.Matrix.Params - } - for _, p := range append(pt.Params, matrixParams...) { + for _, p := range pt.extractAllParams() { expressions, _ := GetVarSubstitutionExpressionsForParam(p) refs = append(refs, NewResultRefs(expressions)...) } - for _, whenExpression := range pt.When { expressions, _ := whenExpression.GetVarSubstitutionExpressions() refs = append(refs, NewResultRefs(expressions)...) } - return refs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json index a67dddcf0e..f7b05c14f2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json @@ -180,28 +180,6 @@ } } }, - "v1.ConfigSource": { - "description": "ConfigSource identifies the source where a resource came from. This can include Git repositories, Task Bundles, file checksums, or other information that allows users to identify where the resource came from and what version was used.", - "type": "object", - "properties": { - "digest": { - "description": "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.digest Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", - "type": "object", - "additionalProperties": { - "type": "string", - "default": "" - } - }, - "entryPoint": { - "description": "EntryPoint identifies the entry point into the build. This is often a path to a configuration file and/or a target label within that file. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.entryPoint Example: \"task/git-clone/0.8/git-clone.yaml\"", - "type": "string" - }, - "uri": { - "description": "URI indicates the identity of the source of the config. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.uri Example: \"https://github.com/tektoncd/catalog\"", - "type": "string" - } - } - }, "v1.EmbeddedTask": { "description": "EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.", "type": "object", @@ -213,6 +191,10 @@ "description": "Description is a user-facing description of the task that may be used to populate a UI.", "type": "string" }, + "displayName": { + "description": "DisplayName is a user-facing name of the task that may be used to populate a UI.", + "type": "string" + }, "kind": { "type": "string" }, @@ -285,10 +267,38 @@ } } }, + "v1.IncludeParams": { + "description": "IncludeParams allows passing in a specific combinations of Parameters into the Matrix.", + "type": "object", + "properties": { + "name": { + "description": "Name the specified combination", + "type": "string" + }, + "params": { + "description": "Params takes only `Parameters` of type `\"string\"` The names of the `params` must match the names of the `params` in the underlying `Task`", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Param" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1.Matrix": { "description": "Matrix is used to fan out Tasks in a Pipeline", "type": "object", "properties": { + "include": { + "description": "Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.IncludeParams" + }, + "x-kubernetes-list-type": "atomic" + }, "params": { "description": "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.", "type": "array", @@ -356,13 +366,13 @@ "description": "ResultValue is a type alias of ParamValue", "type": "object", "required": [ - "type", - "stringVal", - "arrayVal", - "objectVal" + "Type", + "StringVal", + "ArrayVal", + "ObjectVal" ], "properties": { - "arrayVal": { + "ArrayVal": { "type": "array", "items": { "type": "string", @@ -370,19 +380,19 @@ }, "x-kubernetes-list-type": "atomic" }, - "objectVal": { + "ObjectVal": { "type": "object", "additionalProperties": { "type": "string", "default": "" } }, - "stringVal": { + "StringVal": { "description": "Represents the stored type of ParamValues.", "type": "string", "default": "" }, - "type": { + "Type": { "type": "string", "default": "" } @@ -698,6 +708,14 @@ }, "x-kubernetes-list-type": "atomic" }, + "spanContext": { + "description": "SpanContext contains tracing span context fields", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "startTime": { "description": "StartTime is the time the PipelineRun is actually started.", "$ref": "#/definitions/v1.Time" @@ -751,6 +769,14 @@ }, "x-kubernetes-list-type": "atomic" }, + "spanContext": { + "description": "SpanContext contains tracing span context fields", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "startTime": { "description": "StartTime is the time the PipelineRun is actually started.", "$ref": "#/definitions/v1.Time" @@ -788,6 +814,10 @@ "description": "Description is a user-facing description of the pipeline that may be used to populate a UI.", "type": "string" }, + "displayName": { + "description": "DisplayName is a user-facing name of the pipeline that may be used to populate a UI.", + "type": "string" + }, "finally": { "description": "Finally declares the list of Tasks that execute just before leaving the Pipeline i.e. either after all Tasks are finished executing successfully or after a failure which would result in ending the Pipeline", "type": "array", @@ -839,6 +869,14 @@ "description": "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.", "type": "object", "properties": { + "description": { + "description": "Description is the description of this task within the context of a Pipeline. This description may be used to populate a UI.", + "type": "string" + }, + "displayName": { + "description": "DisplayName is the display name of this task within the context of a Pipeline. This display name may be used to populate a UI.", + "type": "string" + }, "matrix": { "description": "Matrix declares parameters used to fan out this task.", "$ref": "#/definitions/v1.Matrix" @@ -999,7 +1037,7 @@ } }, "v1.PipelineWorkspaceDeclaration": { - "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding.\n\nDeprecated: use PipelineWorkspaceDeclaration type instead", "type": "object", "required": [ "name" @@ -1030,16 +1068,38 @@ } }, "v1.Provenance": { - "description": "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.). For now, it only contains the subfield `ConfigSource` that identifies the source where a build config file came from. In future, it can be expanded as needed to include more metadata about the build. This field aims to be used to carry minimum amount of the authenticated metadata in *Run status so that Tekton Chains can pick it up and record in the provenance it generates.", + "description": "Provenance contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote build definition was fetched. This field aims to carry minimum amoumt of metadata in *Run status so that Tekton Chains can capture them in the provenance.", "type": "object", "properties": { - "configSource": { - "description": "ConfigSource identifies the source where a resource came from.", - "$ref": "#/definitions/v1.ConfigSource" - }, "featureFlags": { "description": "FeatureFlags identifies the feature flags that were used during the task/pipeline run", "$ref": "#/definitions/github.com.tektoncd.pipeline.pkg.apis.config.FeatureFlags" + }, + "refSource": { + "description": "RefSource identifies the source where a remote task/pipeline came from.", + "$ref": "#/definitions/v1.RefSource" + } + } + }, + "v1.RefSource": { + "description": "RefSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.", + "type": "object", + "properties": { + "digest": { + "description": "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "entryPoint": { + "description": "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.8/git-clone.yaml\"", + "type": "string" + }, + "uri": { + "description": "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"", + "type": "string" } } }, @@ -1610,11 +1670,11 @@ "type": "object", "properties": { "apiVersion": { - "description": "API version of the referent", + "description": "API version of the referent Note: A Task with non-empty APIVersion and Kind is considered a Custom Task", "type": "string" }, "kind": { - "description": "TaskKind indicates the kind of the task, namespaced or cluster scoped.", + "description": "TaskKind indicates the Kind of the Task: 1. Namespaced Task when Kind is set to \"Task\". If Kind is \"\", it defaults to \"Task\". 2. Custom Task when Kind is non-empty and APIVersion is non-empty", "type": "string" }, "name": { @@ -1929,6 +1989,14 @@ }, "x-kubernetes-list-type": "atomic" }, + "spanContext": { + "description": "SpanContext contains tracing span context fields", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "startTime": { "description": "StartTime is the time the build is actually started.", "$ref": "#/definitions/v1.Time" @@ -1995,6 +2063,14 @@ }, "x-kubernetes-list-type": "atomic" }, + "spanContext": { + "description": "SpanContext contains tracing span context fields", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "startTime": { "description": "StartTime is the time the build is actually started.", "$ref": "#/definitions/v1.Time" @@ -2042,6 +2118,10 @@ "description": "Description is a user-facing description of the task that may be used to populate a UI.", "type": "string" }, + "displayName": { + "description": "DisplayName is a user-facing name of the task that may be used to populate a UI.", + "type": "string" + }, "params": { "description": "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", "type": "array", diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go index eca16b7a7d..9a46de41b8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go @@ -59,7 +59,12 @@ type TaskSpec struct { // value. // +optional // +listType=atomic - Params []ParamSpec `json:"params,omitempty"` + Params ParamSpecs `json:"params,omitempty"` + + // DisplayName is a user-facing name of the task that may be + // used to populate a UI. + // +optional + DisplayName string `json:"displayName,omitempty"` // Description is a user-facing description of the task that may be // used to populate a UI. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go index 5982fa33c7..5848053bf6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -295,9 +295,9 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { if p.Type == ParamTypeObject { - // Object type parameter is an alpha feature and will fail validation if it's used in a task spec - // when the enable-api-fields feature gate is not "alpha". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + // Object type parameter is a beta feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha" or "beta". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) } errs = errs.Also(p.ValidateType(ctx)) } @@ -381,6 +381,8 @@ func ValidateParameterVariables(ctx context.Context, steps []Step, params []Para arrayParameterNames.Insert(p.Name) case ParamTypeObject: objectParamSpecs = append(objectParamSpecs, p) + case ParamTypeString: + fallthrough default: stringParameterNames.Insert(p.Name) } @@ -600,3 +602,39 @@ func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *a func isParamRefs(s string) bool { return strings.HasPrefix(s, "$("+ParamsPrefix) } + +// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. +// error is returned when the array indexing reference is out of bound of the array param +// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. +// - `trParams` are params from taskrun. +// - `taskSpec` contains params declarations. +func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { + cfg := config.FromContextOrDefaults(ctx) + if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { + return nil + } + + // Collect all array params lengths + arrayParamsLengths := ts.Params.extractParamArrayLengths() + for k, v := range params.extractParamArrayLengths() { + arrayParamsLengths[k] = v + } + + // collect all the possible places to use param references + paramsRefs := []string{} + paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...) + paramsRefs = append(paramsRefs, extractParamRefsFromStepTemplate(ts.StepTemplate)...) + paramsRefs = append(paramsRefs, extractParamRefsFromVolumes(ts.Volumes)...) + for _, v := range ts.Workspaces { + paramsRefs = append(paramsRefs, v.MountPath) + } + paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...) + + // extract all array indexing references, for example []{"$(params.array-params[1])"} + arrayIndexParamRefs := []string{} + for _, p := range paramsRefs { + arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) + } + + return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go index 74a319dd71..2bb395dac2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go @@ -20,9 +20,12 @@ package v1 type TaskRef struct { // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name,omitempty"` - // TaskKind indicates the kind of the task, namespaced or cluster scoped. + // TaskKind indicates the Kind of the Task: + // 1. Namespaced Task when Kind is set to "Task". If Kind is "", it defaults to "Task". + // 2. Custom Task when Kind is non-empty and APIVersion is non-empty Kind TaskKind `json:"kind,omitempty"` // API version of the referent + // Note: A Task with non-empty APIVersion and Kind is considered a Custom Task // +optional APIVersion string `json:"apiVersion,omitempty"` @@ -40,3 +43,10 @@ const ( // NamespacedTaskKind indicates that the task type has a namespaced scope. NamespacedTaskKind TaskKind = "Task" ) + +// IsCustomTask checks whether the reference is to a Custom Task +func (tr *TaskRef) IsCustomTask() bool { + // Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`, + // the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457 + return tr != nil && tr.APIVersion != "" && tr.Kind != "" +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go index 9de4940517..02dca53018 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go @@ -39,7 +39,7 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { } } if ref.Params != nil { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "params", config.BetaAPIFields).ViaField("params")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params")) if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "params")) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go index 61932f4668..9f34e6b5c7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go @@ -50,8 +50,13 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { // SetDefaults implements apis.Defaultable func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { cfg := config.FromContextOrDefaults(ctx) - if trs.TaskRef != nil && trs.TaskRef.Kind == "" { - trs.TaskRef.Kind = NamespacedTaskKind + if trs.TaskRef != nil { + if trs.TaskRef.Kind == "" { + trs.TaskRef.Kind = NamespacedTaskKind + } + if trs.TaskRef.Name == "" && trs.TaskRef.Resolver == "" { + trs.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType) + } } if trs.Timeout == nil { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go index 9bf19b81de..7375c01ff5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go @@ -37,7 +37,7 @@ type TaskRunSpec struct { Debug *TaskRunDebug `json:"debug,omitempty"` // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` // +optional ServiceAccountName string `json:"serviceAccountName"` // no more than one of the TaskRef and TaskSpec may be specified. @@ -114,7 +114,7 @@ type TaskRunDebug struct { type TaskRunInputs struct { // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` } var taskRunCondSet = apis.NewBatchConditionSet() @@ -239,6 +239,9 @@ type TaskRunStatusFields struct { // Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.). // +optional Provenance *Provenance `json:"provenance,omitempty"` + + // SpanContext contains tracing span context fields + SpanContext map[string]string `json:"spanContext,omitempty"` } // TaskRunStepSpec is used to override the values of a Step in the corresponding Task. @@ -412,7 +415,7 @@ func (tr *TaskRun) GetTimeout(ctx context.Context) time.Duration { // Use the platform default is no timeout is set if tr.Spec.Timeout == nil { defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) - return defaultTimeout * time.Minute + return defaultTimeout * time.Minute //nolint:durationcheck } return tr.Spec.Timeout.Duration } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go index cc2e54054d..055094bb6a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go @@ -238,13 +238,13 @@ func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs } // ValidateParameters makes sure the params for the Task are valid. -func ValidateParameters(ctx context.Context, params []Param) (errs *apis.FieldError) { +func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) { var names []string for _, p := range params { if p.Value.Type == ParamTypeObject { - // Object type parameter is an alpha feature and will fail validation if it's used in a taskrun spec - // when the enable-api-fields feature gate is not "alpha". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + // Object type parameter is a beta feature and will fail validation if it's used in a taskrun spec + // when the enable-api-fields feature gate is not "alpha" or "beta". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) } names = append(names, p.Name) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go index a68c3064eb..f556201c4b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go @@ -87,6 +87,7 @@ type WorkspaceBinding struct { // WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun // is expected to populate with a workspace binding. +// // Deprecated: use PipelineWorkspaceDeclaration type instead type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go index c1ed3b4849..bd2f039f69 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go @@ -55,26 +55,53 @@ func (in *ChildStatusReference) DeepCopy() *ChildStatusReference { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigSource) DeepCopyInto(out *ConfigSource) { - *out = *in - if in.Digest != nil { - in, out := &in.Digest, &out.Digest - *out = make(map[string]string, len(*in)) +func (in Combination) DeepCopyInto(out *Combination) { + { + in := &in + *out = make(Combination, len(*in)) for key, val := range *in { (*out)[key] = val } + return } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSource. -func (in *ConfigSource) DeepCopy() *ConfigSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combination. +func (in Combination) DeepCopy() Combination { if in == nil { return nil } - out := new(ConfigSource) + out := new(Combination) in.DeepCopyInto(out) - return out + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Combinations) DeepCopyInto(out *Combinations) { + { + in := &in + *out = make(Combinations, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(Combination, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combinations. +func (in Combinations) DeepCopy() Combinations { + if in == nil { + return nil + } + out := new(Combinations) + in.DeepCopyInto(out) + return *out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -97,12 +124,64 @@ func (in *EmbeddedTask) DeepCopy() *EmbeddedTask { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludeParams) DeepCopyInto(out *IncludeParams) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(Params, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParams. +func (in *IncludeParams) DeepCopy() *IncludeParams { + if in == nil { + return nil + } + out := new(IncludeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in IncludeParamsList) DeepCopyInto(out *IncludeParamsList) { + { + in := &in + *out = make(IncludeParamsList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParamsList. +func (in IncludeParamsList) DeepCopy() IncludeParamsList { + if in == nil { + return nil + } + out := new(IncludeParamsList) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Matrix) DeepCopyInto(out *Matrix) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make(IncludeParamsList, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -165,6 +244,28 @@ func (in *ParamSpec) DeepCopy() *ParamSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ParamSpecs) DeepCopyInto(out *ParamSpecs) { + { + in := &in + *out = make(ParamSpecs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpecs. +func (in ParamSpecs) DeepCopy() ParamSpecs { + if in == nil { + return nil + } + out := new(ParamSpecs) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ParamValue) DeepCopyInto(out *ParamValue) { *out = *in @@ -193,6 +294,28 @@ func (in *ParamValue) DeepCopy() *ParamValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Params) DeepCopyInto(out *Params) { + { + in := &in + *out = make(Params, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Params. +func (in Params) DeepCopy() Params { + if in == nil { + return nil + } + out := new(Params) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Pipeline) DeepCopyInto(out *Pipeline) { *out = *in @@ -408,7 +531,7 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -510,6 +633,13 @@ func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { *out = new(Provenance) (*in).DeepCopyInto(*out) } + if in.SpanContext != nil { + in, out := &in.SpanContext, &out.SpanContext + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -563,7 +693,7 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]ParamSpec, len(*in)) + *out = make(ParamSpecs, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -627,7 +757,7 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -845,9 +975,9 @@ func (in *PropertySpec) DeepCopy() *PropertySpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Provenance) DeepCopyInto(out *Provenance) { *out = *in - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(ConfigSource) + if in.RefSource != nil { + in, out := &in.RefSource, &out.RefSource + *out = new(RefSource) (*in).DeepCopyInto(*out) } if in.FeatureFlags != nil { @@ -868,12 +998,35 @@ func (in *Provenance) DeepCopy() *Provenance { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefSource) DeepCopyInto(out *RefSource) { + *out = *in + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefSource. +func (in *RefSource) DeepCopy() *RefSource { + if in == nil { + return nil + } + out := new(RefSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1358,7 +1511,7 @@ func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1453,7 +1606,7 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1584,6 +1737,13 @@ func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) { *out = new(Provenance) (*in).DeepCopyInto(*out) } + if in.SpanContext != nil { + in, out := &in.SpanContext, &out.SpanContext + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -1619,7 +1779,7 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]ParamSpec, len(*in)) + *out = make(ParamSpecs, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go index 0806ef6cb1..68198cf44f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go @@ -55,7 +55,7 @@ type RunSpec struct { Spec *EmbeddedRunSpec `json:"spec,omitempty"` // +optional - Params []v1beta1.Param `json:"params,omitempty"` + Params v1beta1.Params `json:"params,omitempty"` // Used for cancelling a run (and maybe more later on) // +optional diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go index c920004c1e..c16483cb8c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go @@ -62,6 +62,11 @@ type VerificationPolicySpec struct { Resources []ResourcePattern `json:"resources"` // Authorities defines the rules for validating signatures. Authorities []Authority `json:"authorities"` + // Mode controls whether a failing policy will fail the taskrun/pipelinerun, or only log the warnings + // enforce - fail the taskrun/pipelinerun if verification fails (default) + // warn - don't fail the taskrun/pipelinerun if verification fails but log warnings + // +optional + Mode ModeType `json:"mode,omitempty"` } // ResourcePattern defines the pattern of the resource source @@ -82,6 +87,15 @@ type Authority struct { Key *KeyRef `json:"key,omitempty"` } +// ModeType indicates the type of a mode for VerificationPolicy +type ModeType string + +// Valid ModeType: +const ( + ModeWarn ModeType = "warn" + ModeEnforce ModeType = "enforce" +) + // KeyRef defines the reference to a public key type KeyRef struct { // SecretRef sets a reference to a secret with the key. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go index ce3d1a78bc..316ba55da2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go @@ -55,6 +55,9 @@ func (vs *VerificationPolicySpec) Validate(ctx context.Context) (errs *apis.Fiel errs = errs.Also(a.Key.Validate(ctx).ViaFieldIndex("key", i)) } } + if vs.Mode != "" && vs.Mode != ModeEnforce && vs.Mode != ModeWarn { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("available values are: %s, %s, but got: %s", ModeEnforce, ModeWarn, vs.Mode), "mode")) + } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index 9c0c525459..2da3fac462 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -182,7 +182,7 @@ func (in *RunSpec) DeepCopyInto(out *RunSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) + *out = make(v1beta1.Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_types.go index 1a78de2605..aaaf03b9cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_types.go @@ -31,7 +31,9 @@ import ( // ClusterTask is a Task with a cluster scope. ClusterTasks are used to // represent Tasks that should be publicly addressable from any namespace in the -// cluster. Deprecated: Please use the cluster resolver instead. +// cluster. +// +// Deprecated: Please use the cluster resolver instead. type ClusterTask struct { metav1.TypeMeta `json:",inline"` // +optional diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go index 746831bfda..816e4e9918 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go index 72d738c378..980ad392c8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( @@ -42,7 +58,6 @@ type Step struct { // Cannot be updated. // +optional WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // Deprecated. This field will be removed in a future release. // List of ports to expose from the Step's container. Exposing a port here gives // the system additional information about the network connections a // container uses, but is primarily informational. Not specifying a port here @@ -50,6 +65,9 @@ type Step struct { // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. // Cannot be updated. + // + // Deprecated: This field will be removed in a future release. + // // +optional // +patchMergeKey=containerPort // +patchStrategy=merge @@ -91,21 +109,25 @@ type Step struct { // +optional // +listType=atomic VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` - // Deprecated. This field will be removed in a future release. // Periodic probe of container liveness. // Step will be restarted if the probe fails. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedLivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` - // Deprecated. This field will be removed in a future release. // Periodic probe of container service readiness. // Step will be removed from service endpoints if the probe fails. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // Deprecated. This field will be removed in a future release. + // DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized. // If specified, no other probes are executed until this completes successfully. // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. @@ -113,17 +135,22 @@ type Step struct { // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedStartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` - // Deprecated. This field will be removed in a future release. // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedLifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` - // Deprecated. This field will be removed in a future release and can't be meaningfully used. + // Deprecated: This field will be removed in a future release and can't be meaningfully used. // +optional DeprecatedTerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` - // Deprecated. This field will be removed in a future release and can't be meaningfully used. + // Deprecated: This field will be removed in a future release and can't be meaningfully used. // +optional DeprecatedTerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` // Image pull policy. @@ -141,13 +168,14 @@ type Step struct { // Variables for interactive containers, these are deprecated and should not be used. - // Deprecated. This field will be removed in a future release. // Whether this container should allocate a buffer for stdin in the container runtime. If this // is not set, reads from stdin in the container will always result in EOF. // Default is false. + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedStdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` - // Deprecated. This field will be removed in a future release. // Whether the container runtime should close the stdin channel after it has been opened by // a single attach. When stdin is true the stdin stream will remain open across multiple attach // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the @@ -155,11 +183,16 @@ type Step struct { // at which time stdin is closed and remains closed until the container is restarted. If this // flag is false, a container processes that reads from stdin will never receive an EOF. // Default is false + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedStdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` - // Deprecated. This field will be removed in a future release. // Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. // Default is false. + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedTTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` @@ -270,10 +303,12 @@ func (s *Step) SetContainerFields(c corev1.Container) { // StepTemplate is a template for a Step type StepTemplate struct { - // Deprecated. This field will be removed in a future release. // Default name for each Step specified as a DNS_LABEL. // Each Step in a Task must have a unique name. // Cannot be updated. + // + // Deprecated: This field will be removed in a future release. + // DeprecatedName string `json:"name" protobuf:"bytes,1,opt,name=name"` // Default image name to use for each Step. // More info: https://kubernetes.io/docs/concepts/containers/images @@ -309,7 +344,6 @@ type StepTemplate struct { // Cannot be updated. // +optional WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // Deprecated. This field will be removed in a future release. // List of ports to expose from the Step's container. Exposing a port here gives // the system additional information about the network connections a // container uses, but is primarily informational. Not specifying a port here @@ -317,6 +351,9 @@ type StepTemplate struct { // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. // Cannot be updated. + // + // Deprecated: This field will be removed in a future release. + // // +optional // +patchMergeKey=containerPort // +patchStrategy=merge @@ -358,21 +395,24 @@ type StepTemplate struct { // +optional // +listType=atomic VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` - // Deprecated. This field will be removed in a future release. // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedLivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` - // Deprecated. This field will be removed in a future release. // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // Deprecated. This field will be removed in a future release. // DeprecatedStartupProbe indicates that the Pod has successfully initialized. // If specified, no other probes are executed until this completes successfully. // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. @@ -380,17 +420,22 @@ type StepTemplate struct { // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedStartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` - // Deprecated. This field will be removed in a future release. // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedLifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` - // Deprecated. This field will be removed in a future release and cannot be meaningfully used. + // Deprecated: This field will be removed in a future release and cannot be meaningfully used. // +optional DeprecatedTerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` - // Deprecated. This field will be removed in a future release and cannot be meaningfully used. + // Deprecated: This field will be removed in a future release and cannot be meaningfully used. // +optional DeprecatedTerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` // Image pull policy. @@ -408,13 +453,14 @@ type StepTemplate struct { // Variables for interactive containers, these are deprecated and should not be used. - // Deprecated. This field will be removed in a future release. // Whether this Step should allocate a buffer for stdin in the container runtime. If this // is not set, reads from stdin in the Step will always result in EOF. // Default is false. + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedStdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` - // Deprecated. This field will be removed in a future release. // Whether the container runtime should close the stdin channel after it has been opened by // a single attach. When stdin is true the stdin stream will remain open across multiple attach // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the @@ -422,11 +468,16 @@ type StepTemplate struct { // at which time stdin is closed and remains closed until the container is restarted. If this // flag is false, a container processes that reads from stdin will never receive an EOF. // Default is false + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedStdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` - // Deprecated. This field will be removed in a future release. // Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. // Default is false. + // + // Deprecated: This field will be removed in a future release. + // // +optional DeprecatedTTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go index 960602003e..233270037d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go @@ -54,7 +54,7 @@ type CustomRunSpec struct { // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` // Used for cancelling a customrun (and maybe more later on) // +optional @@ -194,7 +194,7 @@ func (r *CustomRun) GetStatusCondition() apis.ConditionAccessor { // GetGroupVersionKind implements kmeta.OwnerRefable. func (*CustomRun) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind(pipeline.RunControllerName) + return SchemeGroupVersion.WithKind(pipeline.CustomRunControllerName) } // HasPipelineRunOwnerReference returns true of CustomRun has diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go new file mode 100644 index 0000000000..f1c86d4e06 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go @@ -0,0 +1,362 @@ +/* +Copyright 2023 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "sort" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "golang.org/x/exp/maps" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/strings/slices" + "knative.dev/pkg/apis" +) + +// Matrix is used to fan out Tasks in a Pipeline +type Matrix struct { + // Params is a list of parameters used to fan out the pipelineTask + // Params takes only `Parameters` of type `"array"` + // Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`. + // The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting. + // +listType=atomic + Params Params `json:"params,omitempty"` + + // Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix. + // +optional + // +listType=atomic + Include IncludeParamsList `json:"include,omitempty"` +} + +// IncludeParamsList is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix. +type IncludeParamsList []IncludeParams + +// IncludeParams allows passing in a specific combinations of Parameters into the Matrix. +type IncludeParams struct { + // Name the specified combination + Name string `json:"name,omitempty"` + + // Params takes only `Parameters` of type `"string"` + // The names of the `params` must match the names of the `params` in the underlying `Task` + // +listType=atomic + Params Params `json:"params,omitempty"` +} + +// Combination is a map, mainly defined to hold a single combination from a Matrix with key as param.Name and value as param.Value +type Combination map[string]string + +// Combinations is a Combination list +type Combinations []Combination + +// FanOut returns an list of params that represent combinations +func (m *Matrix) FanOut() []Params { + var combinations, includeCombinations Combinations + includeCombinations = m.getIncludeCombinations() + if m.HasInclude() && !m.HasParams() { + // If there are only Matrix Include Parameters return explicit combinations + return includeCombinations.toParams() + } + // Generate combinations from Matrix Parameters + for _, parameter := range m.Params { + combinations = combinations.fanOutMatrixParams(parameter) + } + combinations.overwriteCombinations(includeCombinations) + combinations = combinations.addNewCombinations(includeCombinations) + return combinations.toParams() +} + +// overwriteCombinations replaces any missing include params in the initial +// matrix params combinations by overwriting the initial combinations with the +// include combinations +func (cs Combinations) overwriteCombinations(ics Combinations) { + for _, paramCombination := range cs { + for _, includeCombination := range ics { + if paramCombination.contains(includeCombination) { + // overwrite the parameter name and value in existing combination + // with the include combination + for name, val := range includeCombination { + paramCombination[name] = val + } + } + } + } +} + +// addNewCombinations creates a new combination for any include parameter +// values that are missing entirely from the initial combinations and +// returns all combinations +func (cs Combinations) addNewCombinations(ics Combinations) Combinations { + for _, includeCombination := range ics { + if cs.shouldAddNewCombination(includeCombination) { + cs = append(cs, includeCombination) + } + } + return cs +} + +// contains returns true if the include parameter name and value exists in combinations +func (c Combination) contains(includeCombination Combination) bool { + for name, val := range includeCombination { + if _, exist := c[name]; exist { + if c[name] != val { + return false + } + } + } + return true +} + +// shouldAddNewCombination returns true if the include parameter name exists but the value is +// missing from combinations +func (cs Combinations) shouldAddNewCombination(includeCombination map[string]string) bool { + if len(includeCombination) == 0 { + return false + } + for _, paramCombination := range cs { + for name, val := range includeCombination { + if _, exist := paramCombination[name]; exist { + if paramCombination[name] == val { + return false + } + } + } + } + return true +} + +// toParams transforms Combinations from a slice of map[string]string to a slice of Params +// such that, these combinations can be directly consumed in creating taskRun/run object +func (cs Combinations) toParams() []Params { + listOfParams := make([]Params, len(cs)) + for i := range cs { + var params Params + combination := cs[i] + order, _ := combination.sortCombination() + for _, key := range order { + params = append(params, Param{ + Name: key, + Value: ParamValue{Type: ParamTypeString, StringVal: combination[key]}, + }) + } + listOfParams[i] = params + } + return listOfParams +} + +// fanOutMatrixParams generates new combinations based on Matrix Parameters. +func (cs Combinations) fanOutMatrixParams(param Param) Combinations { + if len(cs) == 0 { + return initializeCombinations(param) + } + return cs.distribute(param) +} + +// getIncludeCombinations generates combinations based on Matrix Include Parameters +func (m *Matrix) getIncludeCombinations() Combinations { + var combinations Combinations + for i := range m.Include { + includeParams := m.Include[i].Params + newCombination := make(Combination) + for _, param := range includeParams { + newCombination[param.Name] = param.Value.StringVal + } + combinations = append(combinations, newCombination) + } + return combinations +} + +// distribute generates a new Combination of Parameters by adding a new Parameter to an existing list of Combinations. +func (cs Combinations) distribute(param Param) Combinations { + var expandedCombinations Combinations + for _, value := range param.Value.ArrayVal { + for _, combination := range cs { + newCombination := make(Combination) + maps.Copy(newCombination, combination) + newCombination[param.Name] = value + _, orderedCombination := newCombination.sortCombination() + expandedCombinations = append(expandedCombinations, orderedCombination) + } + } + return expandedCombinations +} + +// initializeCombinations generates a new Combination based on the first Parameter in the Matrix. +func initializeCombinations(param Param) Combinations { + var combinations Combinations + for _, value := range param.Value.ArrayVal { + combinations = append(combinations, Combination{param.Name: value}) + } + return combinations +} + +// sortCombination sorts the given Combination based on the Parameter names to produce a deterministic ordering +func (c Combination) sortCombination() ([]string, Combination) { + sortedCombination := make(Combination, len(c)) + order := make([]string, 0, len(c)) + for key := range c { + order = append(order, key) + } + sort.Slice(order, func(i, j int) bool { + return order[i] <= order[j] + }) + for _, key := range order { + sortedCombination[key] = c[key] + } + return order, sortedCombination +} + +// CountCombinations returns the count of Combinations of Parameters generated from the Matrix in PipelineTask. +func (m *Matrix) CountCombinations() int { + // Iterate over Matrix Parameters and compute count of all generated Combinations + count := m.countGeneratedCombinationsFromParams() + + // Add any additional Combinations generated from Matrix Include Parameters + count += m.countNewCombinationsFromInclude() + + return count +} + +// countGeneratedCombinationsFromParams returns the count of Combinations of Parameters generated from the Matrix +// Parameters +func (m *Matrix) countGeneratedCombinationsFromParams() int { + if !m.HasParams() { + return 0 + } + count := 1 + for _, param := range m.Params { + count *= len(param.Value.ArrayVal) + } + return count +} + +// countNewCombinationsFromInclude returns the count of Combinations of Parameters generated from the Matrix +// Include Parameters +func (m *Matrix) countNewCombinationsFromInclude() int { + if !m.HasInclude() { + return 0 + } + if !m.HasParams() { + return len(m.Include) + } + count := 0 + matrixParamMap := m.Params.extractParamMapArrVals() + for _, include := range m.Include { + for _, param := range include.Params { + if val, exist := matrixParamMap[param.Name]; exist { + // If the Matrix Include param values does not exist, a new Combination will be generated + if !slices.Contains(val, param.Value.StringVal) { + count++ + } else { + break + } + } + } + } + return count +} + +// HasInclude returns true if the Matrix has Include Parameters +func (m *Matrix) HasInclude() bool { + return m != nil && m.Include != nil && len(m.Include) > 0 +} + +// HasParams returns true if the Matrix has Parameters +func (m *Matrix) HasParams() bool { + return m != nil && m.Params != nil && len(m.Params) > 0 +} + +// GetAllParams returns a list of all Matrix Parameters +func (m *Matrix) GetAllParams() Params { + var params Params + if m.HasParams() { + params = append(params, m.Params...) + } + if m.HasInclude() { + for _, include := range m.Include { + params = append(params, include.Params...) + } + } + return params +} + +func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.FieldError) { + matrixCombinationsCount := m.CountCombinations() + maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount + if matrixCombinationsCount > maxMatrixCombinationsCount { + errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix")) + } + return errs +} + +// validateParams validates the type of Parameter for Matrix.Params and Matrix.Include.Params +// Matrix.Params must be of type array. Matrix.Include.Params must be of type string. +// validateParams also validates Matrix.Params for a unique list of params +// and a unique list of params in each Matrix.Include.Params specification +func (m *Matrix) validateParams() (errs *apis.FieldError) { + if m != nil { + if m.HasInclude() { + for i, include := range m.Include { + errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i))) + for _, param := range include.Params { + if param.Value.Type != ParamTypeString { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type string only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.include.params", param.Name)) + } + } + } + } + if m.HasParams() { + errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params")) + for _, param := range m.Params { + if param.Value.Type != ParamTypeArray { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type array only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.params", param.Name)) + } + } + } + } + return errs +} + +// validatePipelineParametersVariablesInMatrixParameters validates all pipeline parameter variables including Matrix.Params and Matrix.Include.Params +// that may contain the reference(s) to other params to make sure those references are used appropriately. +func (m *Matrix) validatePipelineParametersVariablesInMatrixParameters(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + if m.HasInclude() { + for _, include := range m.Include { + for idx, param := range include.Params { + stringElement := param.Value.StringVal + // Matrix Include Params must be of type string + errs = errs.Also(validateStringVariable(stringElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("", idx).ViaField("matrix.include.params", "")) + } + } + } + if m.HasParams() { + for _, param := range m.Params { + for idx, arrayElement := range param.Value.ArrayVal { + // Matrix Params must be of type array + errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix.params", param.Name)) + } + } + } + return errs +} + +func (m *Matrix) validateParameterInOneOfMatrixOrParams(params Params) (errs *apis.FieldError) { + matrixParamNames := m.GetAllParams().ExtractNames() + for _, param := range params { + if matrixParamNames.Has(param.Name) { + errs = errs.Also(apis.ErrMultipleOneOf("matrix["+param.Name+"]", "params["+param.Name+"]")) + } + } + return errs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go index b8dd731160..df879cd588 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -43,6 +43,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec": schema_pkg_apis_pipeline_v1beta1_CustomRunSpec(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec": schema_pkg_apis_pipeline_v1beta1_EmbeddedCustomRunSpec(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask": schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.IncludeParams": schema_pkg_apis_pipeline_v1beta1_IncludeParams(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.InternalTaskModifier": schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix": schema_pkg_apis_pipeline_v1beta1_Matrix(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param": schema_pkg_apis_pipeline_v1beta1_Param(ref), @@ -54,7 +55,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef": schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding": schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef": schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult": schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult": schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun": schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunList": schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref), @@ -76,6 +76,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec": schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance": schema_pkg_apis_pipeline_v1beta1_Provenance(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource": schema_pkg_apis_pipeline_v1beta1_RefSource(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverRef": schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResultRef": schema_pkg_apis_pipeline_v1beta1_ResultRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar": schema_pkg_apis_pipeline_v1beta1_Sidecar(ref), @@ -116,13 +117,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestSpec": schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref), "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestStatus": schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref), "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestStatusFields": schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource": schema_pkg_apis_resource_v1alpha1_PipelineResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceList": schema_pkg_apis_resource_v1alpha1_PipelineResourceList(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec": schema_pkg_apis_resource_v1alpha1_PipelineResourceSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus": schema_pkg_apis_resource_v1alpha1_PipelineResourceStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceDeclaration": schema_pkg_apis_resource_v1alpha1_ResourceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam": schema_pkg_apis_resource_v1alpha1_ResourceParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam": schema_pkg_apis_resource_v1alpha1_SecretParam(ref), } } @@ -548,7 +542,7 @@ func schema_pkg_apis_pipeline_v1beta1_ClusterTask(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ClusterTask is a Task with a cluster scope. ClusterTasks are used to represent Tasks that should be publicly addressable from any namespace in the cluster. Deprecated: Please use the cluster resolver instead.", + Description: "ClusterTask is a Task with a cluster scope. ClusterTasks are used to represent Tasks that should be publicly addressable from any namespace in the cluster.\n\nDeprecated: Please use the cluster resolver instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -639,19 +633,19 @@ func schema_pkg_apis_pipeline_v1beta1_ConfigSource(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ConfigSource identifies the source where a resource came from. This can include Git repositories, Task Bundles, file checksums, or other information that allows users to identify where the resource came from and what version was used.", + Description: "ConfigSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "uri": { SchemaProps: spec.SchemaProps{ - Description: "URI indicates the identity of the source of the config. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.uri Example: \"https://github.com/tektoncd/catalog\"", + Description: "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"", Type: []string{"string"}, Format: "", }, }, "digest": { SchemaProps: spec.SchemaProps{ - Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.digest Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", + Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{ Allows: true, @@ -667,7 +661,7 @@ func schema_pkg_apis_pipeline_v1beta1_ConfigSource(ref common.ReferenceCallback) }, "entryPoint": { SchemaProps: spec.SchemaProps{ - Description: "EntryPoint identifies the entry point into the build. This is often a path to a configuration file and/or a target label within that file. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.entryPoint Example: \"task/git-clone/0.8/git-clone.yaml\"", + Description: "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.8/git-clone.yaml\"", Type: []string{"string"}, Format: "", }, @@ -945,7 +939,7 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.", + Description: "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.\n\nDeprecated: Unused, preserved only for backwards compatibility", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources"), }, }, @@ -968,6 +962,13 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) }, }, }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a user-facing description of the task that may be used to populate a UI.", @@ -1084,11 +1085,52 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) } } +func schema_pkg_apis_pipeline_v1beta1_IncludeParams(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "IncludeParams allows passing in a specific combinations of Parameters into the Matrix.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the specified combination", + Type: []string{"string"}, + Format: "", + }, + }, + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params takes only `Parameters` of type `\"string\"` The names of the `params` must match the names of the `params` in the underlying `Task`", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, + } +} + func schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines.", + Description: "InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "stepsToPrepend": { @@ -1180,11 +1222,30 @@ func schema_pkg_apis_pipeline_v1beta1_Matrix(ref common.ReferenceCallback) commo }, }, }, + "include": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.IncludeParams"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.IncludeParams", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, } } @@ -1283,14 +1344,14 @@ func schema_pkg_apis_pipeline_v1beta1_ParamValue(ref common.ReferenceCallback) c Description: "ResultValue is a type alias of ParamValue", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "type": { + "Type": { SchemaProps: spec.SchemaProps{ Default: "", Type: []string{"string"}, Format: "", }, }, - "stringVal": { + "StringVal": { SchemaProps: spec.SchemaProps{ Description: "Represents the stored type of ParamValues.", Default: "", @@ -1298,7 +1359,7 @@ func schema_pkg_apis_pipeline_v1beta1_ParamValue(ref common.ReferenceCallback) c Format: "", }, }, - "arrayVal": { + "ArrayVal": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", @@ -1317,7 +1378,7 @@ func schema_pkg_apis_pipeline_v1beta1_ParamValue(ref common.ReferenceCallback) c }, }, }, - "objectVal": { + "ObjectVal": { SchemaProps: spec.SchemaProps{ Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{ @@ -1333,7 +1394,7 @@ func schema_pkg_apis_pipeline_v1beta1_ParamValue(ref common.ReferenceCallback) c }, }, }, - Required: []string{"type", "stringVal", "arrayVal", "objectVal"}, + Required: []string{"Type", "StringVal", "ArrayVal", "ObjectVal"}, }, }, } @@ -1385,7 +1446,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref common.Refere return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineDeclaredResource is used by a Pipeline to declare the types of the PipelineResources that it will required to run and names which can be used to refer to these PipelineResources in PipelineTaskResourceBindings.", + Description: "PipelineDeclaredResource is used by a Pipeline to declare the types of the PipelineResources that it will required to run and names which can be used to refer to these PipelineResources in PipelineTaskResourceBindings.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -1490,7 +1551,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref common.ReferenceCallback) }, "bundle": { SchemaProps: spec.SchemaProps{ - Description: "Bundle url reference to a Tekton Bundle. Deprecated: Please use ResolverRef with the bundles resolver instead.", + Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.", Type: []string{"string"}, Format: "", }, @@ -1505,7 +1566,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref common.Referen return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineResourceBinding connects a reference to an instance of a PipelineResource with a PipelineResource dependency that the Pipeline has declared", + Description: "PipelineResourceBinding connects a reference to an instance of a PipelineResource with a PipelineResource dependency that the Pipeline has declared\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -1539,7 +1600,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref common.ReferenceCa return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineResourceRef can be used to refer to a specific instance of a Resource", + Description: "PipelineResourceRef can be used to refer to a specific instance of a Resource\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -1562,46 +1623,6 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref common.ReferenceCa } } -func schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineResourceResult used to export the image name and digest as json", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"key", "value"}, - }, - }, - } -} - func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1845,7 +1866,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref common.ReferenceCallba }, }, SchemaProps: spec.SchemaProps{ - Description: "Resources is a list of bindings specifying which actual instances of PipelineResources to use for the resources the Pipeline has declared it needs.", + Description: "Resources is a list of bindings specifying which actual instances of PipelineResources to use for the resources the Pipeline has declared it needs.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -1897,7 +1918,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref common.ReferenceCallba }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Timeout Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Timeout is the Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration\n\nDeprecated: use pipelineRunSpec.Timeouts.Pipeline instead", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -2015,6 +2036,34 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref common.ReferenceCall Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, + "taskRuns": { + SchemaProps: spec.SchemaProps{ + Description: "TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus"), + }, + }, + }, + }, + }, + "runs": { + SchemaProps: spec.SchemaProps{ + Description: "Runs is a map of PipelineRunRunStatus with the run name as the key\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus"), + }, + }, + }, + }, + }, "pipelineResults": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -2110,7 +2159,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref common.ReferenceCall }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, } } @@ -2133,6 +2182,34 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref common.Referen Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, + "taskRuns": { + SchemaProps: spec.SchemaProps{ + Description: "TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus"), + }, + }, + }, + }, + }, + "runs": { + SchemaProps: spec.SchemaProps{ + Description: "Runs is a map of PipelineRunRunStatus with the run name as the key\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus"), + }, + }, + }, + }, + }, "pipelineResults": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -2228,7 +2305,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref common.Referen }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -2286,6 +2363,13 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref common.ReferenceCallback) Description: "PipelineSpec defines the desired state of Pipeline.", Type: []string{"object"}, Properties: map[string]spec.Schema{ + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a user-facing name of the pipeline that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a user-facing description of the pipeline that may be used to populate a UI.", @@ -2300,7 +2384,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref common.ReferenceCallback) }, }, SchemaProps: spec.SchemaProps{ - Description: "Resources declares the names and types of the resources given to the Pipeline's tasks as inputs and outputs.", + Description: "Deprecated: Unused, preserved only for backwards compatibility", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -2429,6 +2513,20 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) Format: "", }, }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is the display name of this task within the context of a Pipeline. This display name may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is the description of this task within the context of a Pipeline. This description may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "taskRef": { SchemaProps: spec.SchemaProps{ Description: "TaskRef is a reference to a task definition.", @@ -2484,7 +2582,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources declares the resources given to this task as inputs and outputs.", + Description: "Deprecated: Unused, preserved only for backwards compatibility", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources"), }, }, @@ -2550,7 +2648,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref common.Refer return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineTaskInputResource maps the name of a declared PipelineResource input dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used. This input may come from a previous task.", + Description: "PipelineTaskInputResource maps the name of a declared PipelineResource input dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used. This input may come from a previous task.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -2643,7 +2741,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref common.Refe return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineTaskOutputResource maps the name of a declared PipelineResource output dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used.", + Description: "PipelineTaskOutputResource maps the name of a declared PipelineResource output dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -2701,7 +2799,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref common.Reference return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.", + Description: "PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "inputs": { @@ -2852,7 +2950,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref common.Re return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding.\n\nDeprecated: use PipelineWorkspaceDeclaration type instead", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -2907,15 +3005,21 @@ func schema_pkg_apis_pipeline_v1beta1_Provenance(ref common.ReferenceCallback) c return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.). For now, it only contains the subfield `ConfigSource` that identifies the source where a build config file came from. In future, it can be expanded as needed to include more metadata about the build. This field aims to be used to carry minimum amount of the authenticated metadata in *Run status so that Tekton Chains can pick it up and record in the provenance it generates.", + Description: "Provenance contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote build definition was fetched. This field aims to carry minimum amoumt of metadata in *Run status so that Tekton Chains can capture them in the provenance.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "configSource": { SchemaProps: spec.SchemaProps{ - Description: "ConfigSource identifies the source where a resource came from.", + Description: "Deprecated: Use RefSource instead", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), }, }, + "refSource": { + SchemaProps: spec.SchemaProps{ + Description: "RefSource identifies the source where a remote task/pipeline came from.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), + }, + }, "featureFlags": { SchemaProps: spec.SchemaProps{ Description: "FeatureFlags identifies the feature flags that were used during the task/pipeline run", @@ -2926,7 +3030,50 @@ func schema_pkg_apis_pipeline_v1beta1_Provenance(ref common.ReferenceCallback) c }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"}, + "github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_RefSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RefSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "uri": { + SchemaProps: spec.SchemaProps{ + Description: "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"", + Type: []string{"string"}, + Format: "", + }, + }, + "digest": { + SchemaProps: spec.SchemaProps{ + Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "entryPoint": { + SchemaProps: spec.SchemaProps{ + Description: "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.8/git-clone.yaml\"", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } @@ -3487,7 +3634,7 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, }, SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Description: "List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3590,38 +3737,38 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, "livenessProbe": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Step will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Periodic probe of container liveness. Step will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "readinessProbe": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Step will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Periodic probe of container service readiness. Step will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "startupProbe": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "lifecycle": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Lifecycle"), }, }, "terminationMessagePath": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release and can't be meaningfully used.", + Description: "Deprecated: This field will be removed in a future release and can't be meaningfully used.", Type: []string{"string"}, Format: "", }, }, "terminationMessagePolicy": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release and can't be meaningfully used.", + Description: "Deprecated: This field will be removed in a future release and can't be meaningfully used.", Type: []string{"string"}, Format: "", }, @@ -3641,21 +3788,21 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, "stdin": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.\n\nDeprecated: This field will be removed in a future release.", Type: []string{"boolean"}, Format: "", }, }, "stdinOnce": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false\n\nDeprecated: This field will be removed in a future release.", Type: []string{"boolean"}, Format: "", }, }, "tty": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", + Description: "Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.\n\nDeprecated: This field will be removed in a future release.", Type: []string{"boolean"}, Format: "", }, @@ -3800,7 +3947,7 @@ func schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref common.ReferenceCallback) Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Default name for each Step specified as a DNS_LABEL. Each Step in a Task must have a unique name. Cannot be updated.", + Description: "Default name for each Step specified as a DNS_LABEL. Each Step in a Task must have a unique name. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", Default: "", Type: []string{"string"}, Format: "", @@ -3873,7 +4020,7 @@ func schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref common.ReferenceCallback) }, }, SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Description: "List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3976,38 +4123,38 @@ func schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref common.ReferenceCallback) }, "livenessProbe": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "readinessProbe": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "startupProbe": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "lifecycle": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", Ref: ref("k8s.io/api/core/v1.Lifecycle"), }, }, "terminationMessagePath": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release and cannot be meaningfully used.", + Description: "Deprecated: This field will be removed in a future release and cannot be meaningfully used.", Type: []string{"string"}, Format: "", }, }, "terminationMessagePolicy": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release and cannot be meaningfully used.", + Description: "Deprecated: This field will be removed in a future release and cannot be meaningfully used.", Type: []string{"string"}, Format: "", }, @@ -4027,21 +4174,21 @@ func schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref common.ReferenceCallback) }, "stdin": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Whether this Step should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Step will always result in EOF. Default is false.", + Description: "Whether this Step should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Step will always result in EOF. Default is false.\n\nDeprecated: This field will be removed in a future release.", Type: []string{"boolean"}, Format: "", }, }, "stdinOnce": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false\n\nDeprecated: This field will be removed in a future release.", Type: []string{"boolean"}, Format: "", }, }, "tty": { SchemaProps: spec.SchemaProps{ - Description: "Deprecated. This field will be removed in a future release. Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", + Description: "Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.\n\nDeprecated: This field will be removed in a future release.", Type: []string{"boolean"}, Format: "", }, @@ -4162,21 +4309,21 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRef(ref common.ReferenceCallback) comm }, "kind": { SchemaProps: spec.SchemaProps{ - Description: "TaskKind indicates the kind of the task, namespaced or cluster scoped.", + Description: "TaskKind indicates the Kind of the Task: 1. Namespaced Task when Kind is set to \"Task\". If Kind is \"\", it defaults to \"Task\". 2. Cluster-Scoped Task when Kind is set to \"ClusterTask\" 3. Custom Task when Kind is non-empty and APIVersion is non-empty", Type: []string{"string"}, Format: "", }, }, "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "API version of the referent", + Description: "API version of the referent Note: A Task with non-empty APIVersion and Kind is considered a Custom Task", Type: []string{"string"}, Format: "", }, }, "bundle": { SchemaProps: spec.SchemaProps{ - Description: "Bundle url reference to a Tekton Bundle. Deprecated: Please use ResolverRef with the bundles resolver instead.", + Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.", Type: []string{"string"}, Format: "", }, @@ -4191,7 +4338,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResource(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskResource defines an input or output Resource declared as a requirement by a Task. The Name field will be used to refer to these Resources within the Task definition, and when provided as an Input, the Name will be the path to the volume mounted containing this Resource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).", + Description: "TaskResource defines an input or output Resource declared as a requirement by a Task. The Name field will be used to refer to these Resources within the Task definition, and when provided as an Input, the Name will be the path to the volume mounted containing this Resource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -4242,7 +4389,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref common.ReferenceCa return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskResourceBinding points to the PipelineResource that will be used for the Task input or output called Name.", + Description: "TaskResourceBinding points to the PipelineResource that will be used for the Task input or output called Name.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -4296,7 +4443,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResources(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.", + Description: "TaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "inputs": { @@ -4481,7 +4628,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRunInputs holds the input values that this task was invoked with.", + Description: "TaskRunInputs holds the input values that this task was invoked with.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "resources": { @@ -4581,7 +4728,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref common.ReferenceCallbac return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRunOutputs holds the output values that this task was invoked with.", + Description: "TaskRunOutputs holds the output values that this task was invoked with.\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "resources": { @@ -4614,7 +4761,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref common.ReferenceCallb return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRunResources allows a TaskRun to declare inputs and outputs TaskResourceBinding", + Description: "TaskRunResources allows a TaskRun to declare inputs and outputs TaskResourceBinding\n\nDeprecated: Unused, preserved only for backwards compatibility", Type: []string{"object"}, Properties: map[string]spec.Schema{ "inputs": { @@ -4764,7 +4911,8 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref common.ReferenceCallback) }, "resources": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources"), + Description: "Deprecated: Unused, preserved only for backwards compatibility", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources"), }, }, "serviceAccountName": { @@ -4985,7 +5133,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref common.ReferenceCallback }, }, SchemaProps: spec.SchemaProps{ - Description: "Deprecated. CloudEvents describe the state of each cloud event requested via a CloudEventResource.", + Description: "CloudEvents describe the state of each cloud event requested via a CloudEventResource.\n\nDeprecated: Removed in v0.44.0.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -5023,13 +5171,13 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref common.ReferenceCallback }, }, SchemaProps: spec.SchemaProps{ - Description: "Results from Resources built during the TaskRun. currently includes the digest of build container images", + Description: "Results from Resources built during the TaskRun. This is tomb-stoned along with the removal of pipelineResources Deprecated: this field is not populated and is preserved only for backwards compatibility", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult"), + Ref: ref("github.com/tektoncd/pipeline/pkg/result.RunResult"), }, }, }, @@ -5106,7 +5254,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref common.ReferenceCallback }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/result.RunResult", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, } } @@ -5163,7 +5311,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref common.ReferenceCa }, }, SchemaProps: spec.SchemaProps{ - Description: "Deprecated. CloudEvents describe the state of each cloud event requested via a CloudEventResource.", + Description: "CloudEvents describe the state of each cloud event requested via a CloudEventResource.\n\nDeprecated: Removed in v0.44.0.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -5201,13 +5349,13 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref common.ReferenceCa }, }, SchemaProps: spec.SchemaProps{ - Description: "Results from Resources built during the TaskRun. currently includes the digest of build container images", + Description: "Results from Resources built during the TaskRun. This is tomb-stoned along with the removal of pipelineResources Deprecated: this field is not populated and is preserved only for backwards compatibility", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult"), + Ref: ref("github.com/tektoncd/pipeline/pkg/result.RunResult"), }, }, }, @@ -5284,7 +5432,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref common.ReferenceCa }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/result.RunResult", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -5328,7 +5476,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref common.ReferenceCallback) com Properties: map[string]spec.Schema{ "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.", + Description: "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.\n\nDeprecated: Unused, preserved only for backwards compatibility", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources"), }, }, @@ -5351,6 +5499,13 @@ func schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref common.ReferenceCallback) com }, }, }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a user-facing description of the task that may be used to populate a UI.", @@ -5931,16 +6086,22 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref common.Refer }, "source": { SchemaProps: spec.SchemaProps{ - Description: "Source is the source reference of the remote data that records the url, digest and the entrypoint.", + Description: "Deprecated: Use RefSource instead", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), }, }, + "refSource": { + SchemaProps: spec.SchemaProps{ + Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), + }, + }, }, - Required: []string{"data", "source"}, + Required: []string{"data", "source", "refSource"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "knative.dev/pkg/apis.Condition"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource", "knative.dev/pkg/apis.Condition"}, } } @@ -5961,304 +6122,21 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref common }, "source": { SchemaProps: spec.SchemaProps{ - Description: "Source is the source reference of the remote data that records the url, digest and the entrypoint.", + Description: "Deprecated: Use RefSource instead", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), }, }, - }, - Required: []string{"data", "source"}, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"}, - } -} - -func schema_pkg_apis_resource_v1alpha1_PipelineResource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineResource describes a resource that is an input to or output from a Task.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec holds the desired state of the PipelineResource from the client", - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status is deprecated. It usually is used to communicate the observed state of the PipelineResource from the controller, but was unused as there is no controller for PipelineResource.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec", "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_resource_v1alpha1_PipelineResourceList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineResourceList contains a list of PipelineResources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { + "refSource": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource"), - }, - }, - }, + Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), }, }, }, - Required: []string{"items"}, + Required: []string{"data", "source", "refSource"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_resource_v1alpha1_PipelineResourceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineResourceSpec defines an individual resources used in the pipeline.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "description": { - SchemaProps: spec.SchemaProps{ - Description: "Description is a user-facing description of the resource that may be used to populate a UI.", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "params": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam"), - }, - }, - }, - }, - }, - "secrets": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Secrets to fetch to populate some of resource fields", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam"), - }, - }, - }, - }, - }, - }, - Required: []string{"type", "params"}, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam", "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam"}, - } -} - -func schema_pkg_apis_resource_v1alpha1_PipelineResourceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineResourceStatus does not contain anything because PipelineResources on their own do not have a status Deprecated", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_resource_v1alpha1_ResourceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceDeclaration defines an input or output PipelineResource declared as a requirement by another type such as a Task or Condition. The Name field will be used to refer to these PipelineResources within the type's definition, and when provided as an Input, the Name will be the path to the volume mounted containing this PipelineResource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name declares the name by which a resource is referenced in the definition. Resources may be referenced by name in the definition of a Task's steps.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is the type of this resource;", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "description": { - SchemaProps: spec.SchemaProps{ - Description: "Description is a user-facing description of the declared resource that may be used to populate a UI.", - Type: []string{"string"}, - Format: "", - }, - }, - "targetPath": { - SchemaProps: spec.SchemaProps{ - Description: "TargetPath is the path in workspace directory where the resource will be copied.", - Type: []string{"string"}, - Format: "", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Optional declares the resource as optional. By default optional is set to false which makes a resource required. optional: true - the resource is considered optional optional: false - the resource is considered required (equivalent of not specifying it)", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name", "type"}, - }, - }, - } -} - -func schema_pkg_apis_resource_v1alpha1_ResourceParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceParam declares a string value to use for the parameter called Name, and is used in the specific context of PipelineResources.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - } -} - -func schema_pkg_apis_resource_v1alpha1_SecretParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SecretParam indicates which secret can be used to populate a field of the resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "fieldName": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "secretKey": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "secretName": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"fieldName", "secretKey", "secretName"}, - }, - }, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"}, } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go index 443c94ddde..18de6bd71d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index 959d6b7ca7..a2d7e78f3d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -22,8 +22,8 @@ import ( "fmt" "strings" - resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/substitution" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -55,6 +55,9 @@ type ParamSpec struct { Default *ParamValue `json:"default,omitempty"` } +// ParamSpecs is a list of ParamSpec +type ParamSpecs []ParamSpec + // PropertySpec defines the struct for object keys type PropertySpec struct { Type ParamType `json:"type,omitempty"` @@ -100,16 +103,253 @@ func (pp *ParamSpec) setDefaultsForProperties() { } } -// ResourceParam declares a string value to use for the parameter called Name, and is used in -// the specific context of PipelineResources. -type ResourceParam = resource.ResourceParam - // Param declares an ParamValues to use for the parameter called name. type Param struct { Name string `json:"name"` Value ParamValue `json:"value"` } +// Params is a list of Param +type Params []Param + +// ExtractNames returns a set of unique names +func (ps Params) ExtractNames() sets.String { + names := sets.String{} + for _, p := range ps { + names.Insert(p.Name) + } + return names +} + +func (ps Params) extractValues() []string { + pvs := []string{} + for i := range ps { + pvs = append(pvs, ps[i].Value.StringVal) + pvs = append(pvs, ps[i].Value.ArrayVal...) + for _, v := range ps[i].Value.ObjectVal { + pvs = append(pvs, v) + } + } + return pvs +} + +// extractParamMapArrVals creates a param map with the key: param.Name and +// val: param.Value.ArrayVal +func (ps Params) extractParamMapArrVals() map[string][]string { + paramsMap := make(map[string][]string) + for _, p := range ps { + paramsMap[p.Name] = p.Value.ArrayVal + } + return paramsMap +} + +// extractParamArrayLengths extract and return the lengths of all array params +// Example of returned value: {"a-array-params": 2,"b-array-params": 2 } +func (ps Params) extractParamArrayLengths() map[string]int { + // Collect all array params + arrayParamsLengths := make(map[string]int) + + // Collect array params lengths from params + for _, p := range ps { + if p.Value.Type == ParamTypeArray { + arrayParamsLengths[p.Name] = len(p.Value.ArrayVal) + } + } + return arrayParamsLengths +} + +// validateDuplicateParameters checks if a parameter with the same name is defined more than once +func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) { + taskParamNames := sets.NewString() + for i, param := range ps { + if taskParamNames.Has(param.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("parameter names must be unique,"+ + " the parameter \"%s\" is also defined at", param.Name), fmt.Sprintf("[%d].name", i))) + } + taskParamNames.Insert(param.Name) + } + return errs +} + +// extractParamArrayLengths extract and return the lengths of all array params +// Example of returned value: {"a-array-params": 2,"b-array-params": 2 } +func (ps ParamSpecs) extractParamArrayLengths() map[string]int { + // Collect all array params + arrayParamsLengths := make(map[string]int) + + // Collect array params lengths from defaults + for _, p := range ps { + if p.Default != nil { + if p.Default.Type == ParamTypeArray { + arrayParamsLengths[p.Name] = len(p.Default.ArrayVal) + } + } + } + return arrayParamsLengths +} + +// validateOutofBoundArrayParams validates if the array indexing params are out of bound +// example of arrayIndexingParams: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"] +// example of arrayParamsLengths: {"a-array-params": 2,"b-array-params": 2 } +func validateOutofBoundArrayParams(arrayIndexingParams []string, arrayParamsLengths map[string]int) error { + outofBoundParams := sets.String{} + for _, val := range arrayIndexingParams { + indexString := substitution.ExtractIndexString(val) + idx, _ := substitution.ExtractIndex(indexString) + // this will extract the param name from reference + // e.g. $(params.a-array-param[1]) -> a-array-param + paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params") + + if paramLength, ok := arrayParamsLengths[paramName[0]]; ok { + if idx >= paramLength { + outofBoundParams.Insert(val) + } + } + } + if outofBoundParams.Len() > 0 { + return fmt.Errorf("non-existent param references:%v", outofBoundParams.List()) + } + return nil +} + +// extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param. +// For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`, +// it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"]. +func extractArrayIndexingParamRefs(paramReference string) []string { + l := []string{} + list := substitution.ExtractParamsExpressions(paramReference) + for _, val := range list { + indexString := substitution.ExtractIndexString(val) + if indexString != "" { + l = append(l, val) + } + } + return l +} + +// extractParamRefsFromSteps get all array indexing references from steps +func extractParamRefsFromSteps(steps []Step) []string { + paramsRefs := []string{} + for _, step := range steps { + paramsRefs = append(paramsRefs, step.Script) + container := step.ToK8sContainer() + paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...) + } + return paramsRefs +} + +// extractParamRefsFromStepTemplate get all array indexing references from StepsTemplate +func extractParamRefsFromStepTemplate(stepTemplate *StepTemplate) []string { + if stepTemplate == nil { + return nil + } + container := stepTemplate.ToK8sContainer() + return extractParamRefsFromContainer(container) +} + +// extractParamRefsFromSidecars get all array indexing references from sidecars +func extractParamRefsFromSidecars(sidecars []Sidecar) []string { + paramsRefs := []string{} + for _, s := range sidecars { + paramsRefs = append(paramsRefs, s.Script) + container := s.ToK8sContainer() + paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...) + } + return paramsRefs +} + +// extractParamRefsFromVolumes get all array indexing references from volumes +func extractParamRefsFromVolumes(volumes []corev1.Volume) []string { + paramsRefs := []string{} + for i, v := range volumes { + paramsRefs = append(paramsRefs, v.Name) + if v.VolumeSource.ConfigMap != nil { + paramsRefs = append(paramsRefs, v.ConfigMap.Name) + for _, item := range v.ConfigMap.Items { + paramsRefs = append(paramsRefs, item.Key) + paramsRefs = append(paramsRefs, item.Path) + } + } + if v.VolumeSource.Secret != nil { + paramsRefs = append(paramsRefs, v.Secret.SecretName) + for _, item := range v.Secret.Items { + paramsRefs = append(paramsRefs, item.Key) + paramsRefs = append(paramsRefs, item.Path) + } + } + if v.PersistentVolumeClaim != nil { + paramsRefs = append(paramsRefs, v.PersistentVolumeClaim.ClaimName) + } + if v.Projected != nil { + for _, s := range volumes[i].Projected.Sources { + if s.ConfigMap != nil { + paramsRefs = append(paramsRefs, s.ConfigMap.Name) + } + if s.Secret != nil { + paramsRefs = append(paramsRefs, s.Secret.Name) + } + if s.ServiceAccountToken != nil { + paramsRefs = append(paramsRefs, s.ServiceAccountToken.Audience) + } + } + } + if v.CSI != nil { + if v.CSI.NodePublishSecretRef != nil { + paramsRefs = append(paramsRefs, v.CSI.NodePublishSecretRef.Name) + } + if v.CSI.VolumeAttributes != nil { + for _, value := range v.CSI.VolumeAttributes { + paramsRefs = append(paramsRefs, value) + } + } + } + } + return paramsRefs +} + +// extractParamRefsFromContainer get all array indexing references from container +func extractParamRefsFromContainer(c *corev1.Container) []string { + paramsRefs := []string{} + paramsRefs = append(paramsRefs, c.Name) + paramsRefs = append(paramsRefs, c.Image) + paramsRefs = append(paramsRefs, string(c.ImagePullPolicy)) + paramsRefs = append(paramsRefs, c.Args...) + + for ie, e := range c.Env { + paramsRefs = append(paramsRefs, e.Value) + if c.Env[ie].ValueFrom != nil { + if e.ValueFrom.SecretKeyRef != nil { + paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.LocalObjectReference.Name) + paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.Key) + } + if e.ValueFrom.ConfigMapKeyRef != nil { + paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name) + paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.Key) + } + } + } + + for _, e := range c.EnvFrom { + paramsRefs = append(paramsRefs, e.Prefix) + if e.ConfigMapRef != nil { + paramsRefs = append(paramsRefs, e.ConfigMapRef.LocalObjectReference.Name) + } + if e.SecretRef != nil { + paramsRefs = append(paramsRefs, e.SecretRef.LocalObjectReference.Name) + } + } + + paramsRefs = append(paramsRefs, c.WorkingDir) + paramsRefs = append(paramsRefs, c.Command...) + + for _, v := range c.VolumeMounts { + paramsRefs = append(paramsRefs, v.Name) + paramsRefs = append(paramsRefs, v.MountPath) + paramsRefs = append(paramsRefs, v.SubPath) + } + return paramsRefs +} + // ParamType indicates the type of an input parameter; // Used to distinguish between a single string and an array of strings. type ParamType string @@ -130,14 +370,16 @@ var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject // Used in JSON unmarshalling so that a single JSON field can accept // either an individual string or an array of strings. type ParamValue struct { - Type ParamType `json:"type"` // Represents the stored type of ParamValues. - StringVal string `json:"stringVal"` + Type ParamType // Represents the stored type of ParamValues. + StringVal string // +listType=atomic - ArrayVal []string `json:"arrayVal"` - ObjectVal map[string]string `json:"objectVal"` + ArrayVal []string + ObjectVal map[string]string } // ArrayOrString is deprecated, this is to keep backward compatibility +// +// Deprecated: Use ParamValue instead. type ArrayOrString = ParamValue // UnmarshalJSON implements the json.Unmarshaller interface. @@ -210,6 +452,8 @@ func (paramValues *ParamValue) ApplyReplacements(stringReplacements map[string]s newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) } paramValues.ObjectVal = newObjectVal + case ParamTypeString: + fallthrough default: paramValues.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) } @@ -285,12 +529,9 @@ func ArrayReference(a string) string { // validatePipelineParametersVariablesInTaskParameters validates param value that // may contain the reference(s) to other params to make sure those references are used appropriately. -func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { - taskParamNames := sets.NewString() - for i, param := range params { - if taskParamNames.Has(param.Name) { - errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("params names must be unique, the same param: %s is defined multiple times at", param.Name), fmt.Sprintf("params[%d].name", i))) - } +func validatePipelineParametersVariablesInTaskParameters(params Params, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + errs = errs.Also(params.validateDuplicateParameters()).ViaField("params") + for _, param := range params { switch param.Value.Type { case ParamTypeArray: for idx, arrayElement := range param.Value.ArrayVal { @@ -300,47 +541,11 @@ func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix for key, val := range param.Value.ObjectVal { errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldKey("properties", key).ViaFieldKey("params", param.Name)) } + case ParamTypeString: + fallthrough default: errs = errs.Also(validateParamStringValue(param, prefix, paramNames, arrayParamNames, objectParamNameKeys)) } - taskParamNames.Insert(param.Name) - } - return errs -} - -// validatePipelineParametersVariablesInMatrixParameters validates matrix param value -// that may contain the reference(s) to other params to make sure those references are used appropriately. -func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { - for _, param := range matrix { - for idx, arrayElement := range param.Value.ArrayVal { - errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix", param.Name)) - } - } - return errs -} - -func validateParametersInTaskMatrix(matrix *Matrix) (errs *apis.FieldError) { - if matrix != nil { - for _, param := range matrix.Params { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) - } - } - } - return errs -} - -func validateParameterInOneOfMatrixOrParams(matrix *Matrix, params []Param) (errs *apis.FieldError) { - matrixParameterNames := sets.NewString() - if matrix != nil { - for _, param := range matrix.Params { - matrixParameterNames.Insert(param.Name) - } - } - for _, param := range params { - if matrixParameterNames.Has(param.Name) { - errs = errs.Also(apis.ErrMultipleOneOf("matrix["+param.Name+"]", "params["+param.Name+"]")) - } } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go index 992d738d14..0c66a3bd13 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go @@ -21,8 +21,6 @@ import ( "fmt" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" - "github.com/tektoncd/pipeline/pkg/apis/version" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) @@ -36,9 +34,6 @@ func (p *Pipeline) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Pipeline: sink.ObjectMeta = p.ObjectMeta - if err := serializePipelineResources(&sink.ObjectMeta, &p.Spec); err != nil { - return err - } return p.Spec.ConvertTo(ctx, &sink.Spec) default: return fmt.Errorf("unknown version, got: %T", sink) @@ -47,6 +42,7 @@ func (p *Pipeline) ConvertTo(ctx context.Context, to apis.Convertible) error { // ConvertTo implements apis.Convertible func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec) error { + sink.DisplayName = ps.DisplayName sink.Description = ps.Description sink.Tasks = nil for _, t := range ps.Tasks { @@ -92,9 +88,6 @@ func (p *Pipeline) ConvertFrom(ctx context.Context, from apis.Convertible) error switch source := from.(type) { case *v1.Pipeline: p.ObjectMeta = source.ObjectMeta - if err := deserializePipelineResources(&p.ObjectMeta, &p.Spec); err != nil { - return err - } return p.Spec.ConvertFrom(ctx, &source.Spec) default: return fmt.Errorf("unknown version, got: %T", p) @@ -103,6 +96,7 @@ func (p *Pipeline) ConvertFrom(ctx context.Context, from apis.Convertible) error // ConvertFrom implements apis.Convertible func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec) error { + ps.DisplayName = source.DisplayName ps.Description = source.Description ps.Tasks = nil for _, t := range source.Tasks { @@ -145,6 +139,8 @@ func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) error { sink.Name = pt.Name + sink.DisplayName = pt.DisplayName + sink.Description = pt.Description if pt.TaskRef != nil { sink.TaskRef = &v1.TaskRef{} pt.TaskRef.convertTo(ctx, sink.TaskRef) @@ -189,6 +185,8 @@ func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) err func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) error { pt.Name = source.Name + pt.DisplayName = source.DisplayName + pt.Description = source.Description if source.TaskRef != nil { newTaskRef := TaskRef{} newTaskRef.convertFrom(ctx, *source.TaskRef) @@ -267,6 +265,14 @@ func (m *Matrix) convertTo(ctx context.Context, sink *v1.Matrix) { param.convertTo(ctx, &new) sink.Params = append(sink.Params, new) } + for i, include := range m.Include { + sink.Include = append(sink.Include, v1.IncludeParams{Name: include.Name}) + for _, param := range include.Params { + newIncludeParam := v1.Param{} + param.convertTo(ctx, &newIncludeParam) + sink.Include[i].Params = append(sink.Include[i].Params, newIncludeParam) + } + } } func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { @@ -275,6 +281,15 @@ func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { new.convertFrom(ctx, param) m.Params = append(m.Params, new) } + + for i, include := range source.Include { + m.Include = append(m.Include, IncludeParams{Name: include.Name}) + for _, p := range include.Params { + new := Param{} + new.convertFrom(ctx, p) + m.Include[i].Params = append(m.Include[i].Params, new) + } + } } func (pr PipelineResult) convertTo(ctx context.Context, sink *v1.PipelineResult) { @@ -304,22 +319,3 @@ func (ptm *PipelineTaskMetadata) convertFrom(ctx context.Context, source v1.Pipe ptm.Labels = source.Labels ptm.Annotations = source.Labels } - -func serializePipelineResources(meta *metav1.ObjectMeta, spec *PipelineSpec) error { - if spec.Resources == nil { - return nil - } - return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) -} - -func deserializePipelineResources(meta *metav1.ObjectMeta, spec *PipelineSpec) error { - resources := &[]PipelineDeclaredResource{} - err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey) - if err != nil { - return err - } - if len(*resources) != 0 { - spec.Resources = *resources - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go index a7463ae53e..ec28f038e9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go @@ -19,6 +19,7 @@ package v1beta1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/config" "knative.dev/pkg/apis" ) @@ -36,25 +37,27 @@ func (ps *PipelineSpec) SetDefaults(ctx context.Context) { } for _, pt := range ps.Tasks { - if pt.TaskRef != nil { - if pt.TaskRef.Kind == "" { - pt.TaskRef.Kind = NamespacedTaskKind - } - } - if pt.TaskSpec != nil { - pt.TaskSpec.SetDefaults(ctx) - } + pt.SetDefaults(ctx) } for _, ft := range ps.Finally { ctx := ctx // Ensure local scoping per Task - if ft.TaskRef != nil { - if ft.TaskRef.Kind == "" { - ft.TaskRef.Kind = NamespacedTaskKind - } + ft.SetDefaults(ctx) + } +} + +// SetDefaults sets default values for a PipelineTask +func (pt *PipelineTask) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) + if pt.TaskRef != nil { + if pt.TaskRef.Kind == "" { + pt.TaskRef.Kind = NamespacedTaskKind } - if ft.TaskSpec != nil { - ft.TaskSpec.SetDefaults(ctx) + if pt.TaskRef.Name == "" && pt.TaskRef.Resolver == "" { + pt.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType) } } + if pt.TaskSpec != nil { + pt.TaskSpec.SetDefaults(ctx) + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_interface.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_interface.go index fb21e16daf..58768ceea4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_interface.go @@ -21,7 +21,7 @@ import ( "knative.dev/pkg/apis" ) -// PipelineObject is implemented by Pipeline and ClusterPipeline +// PipelineObject is implemented by Pipeline type PipelineObject interface { apis.Defaultable PipelineMetadata() metav1.ObjectMeta diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go index c84d856090..efb6607dfc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go @@ -17,22 +17,12 @@ limitations under the License. package v1beta1 import ( - "context" - "fmt" - "strings" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" - "github.com/tektoncd/pipeline/pkg/apis/version" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" "knative.dev/pkg/kmeta" ) @@ -87,12 +77,15 @@ func (*Pipeline) GetGroupVersionKind() schema.GroupVersionKind { // PipelineSpec defines the desired state of Pipeline. type PipelineSpec struct { + // DisplayName is a user-facing name of the pipeline that may be + // used to populate a UI. + // +optional + DisplayName string `json:"displayName,omitempty"` // Description is a user-facing description of the pipeline that may be // used to populate a UI. // +optional Description string `json:"description,omitempty"` - // Resources declares the names and types of the resources given to the - // Pipeline's tasks as inputs and outputs. + // Deprecated: Unused, preserved only for backwards compatibility // +listType=atomic Resources []PipelineDeclaredResource `json:"resources,omitempty"` // Tasks declares the graph of Tasks that execute when this Pipeline is run. @@ -101,7 +94,7 @@ type PipelineSpec struct { // Params declares a list of input parameters that must be supplied when // this Pipeline is run. // +listType=atomic - Params []ParamSpec `json:"params,omitempty"` + Params ParamSpecs `json:"params,omitempty"` // Workspaces declares a set of named workspaces that are expected to be // provided by a PipelineRun. // +optional @@ -162,16 +155,6 @@ type EmbeddedTask struct { TaskSpec `json:",inline,omitempty"` } -// Matrix is used to fan out Tasks in a Pipeline -type Matrix struct { - // Params is a list of parameters used to fan out the pipelineTask - // Params takes only `Parameters` of type `"array"` - // Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`. - // The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting. - // +listType=atomic - Params []Param `json:"params,omitempty"` -} - // PipelineTask defines a task in a Pipeline, passing inputs from both // Params and from the output of previous tasks. type PipelineTask struct { @@ -180,6 +163,16 @@ type PipelineTask struct { // the execution order of tasks relative to one another. Name string `json:"name,omitempty"` + // DisplayName is the display name of this task within the context of a Pipeline. + // This display name may be used to populate a UI. + // +optional + DisplayName string `json:"displayName,omitempty"` + + // Description is the description of this task within the context of a Pipeline. + // This description may be used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + // TaskRef is a reference to a task definition. // +optional TaskRef *TaskRef `json:"taskRef,omitempty"` @@ -202,15 +195,14 @@ type PipelineTask struct { // +listType=atomic RunAfter []string `json:"runAfter,omitempty"` - // Resources declares the resources given to this task as inputs and - // outputs. + // Deprecated: Unused, preserved only for backwards compatibility // +optional Resources *PipelineTaskResources `json:"resources,omitempty"` // Parameters declares parameters passed to this task. // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` // Matrix declares parameters used to fan out this task. // +optional @@ -229,204 +221,16 @@ type PipelineTask struct { Timeout *metav1.Duration `json:"timeout,omitempty"` } -// validateRefOrSpec validates at least one of taskRef or taskSpec is specified -func (pt PipelineTask) validateRefOrSpec() (errs *apis.FieldError) { - // can't have both taskRef and taskSpec at the same time - if pt.TaskRef != nil && pt.TaskSpec != nil { - errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec")) - } - // Check that one of TaskRef and TaskSpec is present - if pt.TaskRef == nil && pt.TaskSpec == nil { - errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec")) - } - return errs -} - -// validateCustomTask validates custom task specifications - checking kind and fail if not yet supported features specified -func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { - if pt.TaskRef != nil && pt.TaskRef.Kind == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify kind", "taskRef.kind")) - } - if pt.TaskSpec != nil && pt.TaskSpec.Kind == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind")) - } - if pt.TaskRef != nil && pt.TaskRef.APIVersion == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify apiVersion", "taskRef.apiVersion")) - } - if pt.TaskSpec != nil && pt.TaskSpec.APIVersion == "" { - errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion")) - } - - // TODO(#3133): Support these features if possible. - if pt.Resources != nil { - errs = errs.Also(apis.ErrInvalidValue("custom tasks do not support PipelineResources", "resources")) - } - return errs -} - -// validateBundle validates bundle specifications - checking name and bundle -func (pt PipelineTask) validateBundle() (errs *apis.FieldError) { - // bundle requires a TaskRef to be specified - if (pt.TaskRef != nil && pt.TaskRef.Bundle != "") && pt.TaskRef.Name == "" { - errs = errs.Also(apis.ErrMissingField("taskRef.name")) - } - // If a bundle url is specified, ensure it is parsable - if pt.TaskRef != nil && pt.TaskRef.Bundle != "" { - if _, err := name.ParseReference(pt.TaskRef.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid bundle reference (%s)", err.Error()), "taskRef.bundle")) - } - } - return errs -} - -// validateTask validates a pipeline task or a final task for taskRef and taskSpec -func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - // Validate TaskSpec if it's present - if pt.TaskSpec != nil { - errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) - } - if pt.TaskRef != nil { - if pt.TaskRef.Name != "" { - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { - errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "taskRef.name")) - } - } else if pt.TaskRef.Resolver == "" { - errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) - } - // fail if bundle is present when EnableTektonOCIBundles feature flag is off (as it won't be allowed nor used) - if !cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskRef.bundle")) - } - } - return errs +// IsCustomTask checks whether an embedded TaskSpec is a Custom Task +func (et *EmbeddedTask) IsCustomTask() bool { + // Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`, + // the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457 + return et != nil && et.APIVersion != "" && et.Kind != "" } // IsMatrixed return whether pipeline task is matrixed func (pt *PipelineTask) IsMatrixed() bool { - return pt.Matrix != nil && len(pt.Matrix.Params) > 0 -} - -func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { - if pt.IsMatrixed() { - // This is an alpha feature and will fail validation if it's used in a pipeline spec - // when the enable-api-fields feature gate is anything but "alpha". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) - errs = errs.Also(pt.validateMatrixCombinationsCount(ctx)) - } - errs = errs.Also(validateParameterInOneOfMatrixOrParams(pt.Matrix, pt.Params)) - errs = errs.Also(validateParametersInTaskMatrix(pt.Matrix)) - return errs -} - -func (pt *PipelineTask) validateMatrixCombinationsCount(ctx context.Context) (errs *apis.FieldError) { - matrixCombinationsCount := pt.GetMatrixCombinationsCount() - maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount - if matrixCombinationsCount > maxMatrixCombinationsCount { - errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix")) - } - return errs -} - -func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { - // Reject cases where APIVersion and/or Kind are specified alongside an embedded Task. - // We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items. - if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 { - if pt.TaskSpec.APIVersion != "" { - errs = errs.Also(&apis.FieldError{ - Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps", - Paths: []string{"taskSpec.apiVersion"}, - }) - } - if pt.TaskSpec.Kind != "" { - errs = errs.Also(&apis.FieldError{ - Message: "taskSpec.kind cannot be specified when using taskSpec.steps", - Paths: []string{"taskSpec.kind"}, - }) - } - } - return -} - -// GetMatrixCombinationsCount returns the count of combinations of Parameters generated from the Matrix in PipelineTask. -func (pt *PipelineTask) GetMatrixCombinationsCount() int { - if !pt.IsMatrixed() { - return 0 - } - count := 1 - for _, param := range pt.Matrix.Params { - count *= len(param.Value.ArrayVal) - } - return count -} - -func (pt *PipelineTask) validateResultsFromMatrixedPipelineTasksNotConsumed(matrixedPipelineTasks sets.String) (errs *apis.FieldError) { - for _, ref := range PipelineTaskResultRefs(pt) { - if matrixedPipelineTasks.Has(ref.PipelineTask) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("consuming results from matrixed task %s is not allowed", ref.PipelineTask), "")) - } - } - return errs -} - -func (pt *PipelineTask) validateExecutionStatusVariablesDisallowed() (errs *apis.FieldError) { - for _, param := range pt.Params { - if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { - errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "value"). - ViaFieldKey("params", param.Name)) - } - } - for i, we := range pt.WhenExpressions { - if expressions, ok := we.GetVarSubstitutionExpressions(); ok { - errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, ""). - ViaFieldIndex("when", i)) - } - } - return errs -} - -func (pt *PipelineTask) validateExecutionStatusVariablesAllowed(ptNames sets.String) (errs *apis.FieldError) { - for _, param := range pt.Params { - if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { - errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "value"). - ViaFieldKey("params", param.Name)) - } - } - for i, we := range pt.WhenExpressions { - if expressions, ok := we.GetVarSubstitutionExpressions(); ok { - errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, ""). - ViaFieldIndex("when", i)) - } - } - return errs -} - -func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) { - workspaceBindingNames := sets.NewString() - for i, ws := range pt.Workspaces { - if workspaceBindingNames.Has(ws.Name) { - errs = errs.Also(apis.ErrGeneric( - fmt.Sprintf("workspace name %q must be unique", ws.Name), "").ViaFieldIndex("workspaces", i)) - } - - if ws.Workspace == "" { - if !workspaceNames.Has(ws.Name) { - errs = errs.Also(apis.ErrInvalidValue( - fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name), - "", - ).ViaFieldIndex("workspaces", i)) - } - } else if !workspaceNames.Has(ws.Workspace) { - errs = errs.Also(apis.ErrInvalidValue( - fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), - "", - ).ViaFieldIndex("workspaces", i)) - } - - workspaceBindingNames.Insert(ws.Name) - } - return errs + return pt.Matrix.HasParams() || pt.Matrix.HasInclude() } // TaskSpecMetadata returns the metadata of the PipelineTask's EmbeddedTask spec. @@ -439,56 +243,11 @@ func (pt PipelineTask) HashKey() string { return pt.Name } -// ValidateName checks whether the PipelineTask's name is a valid DNS label -func (pt PipelineTask) ValidateName() *apis.FieldError { - if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", pt.Name), - Paths: []string{"name"}, - Details: "Pipeline Task name must be a valid DNS Label." + - "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - return nil -} - -// Validate classifies whether a task is a custom task, bundle, or a regular task(dag/final) -// calls the validation routine based on the type of the task -func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { - errs = errs.Also(pt.validateRefOrSpec()) - - errs = errs.Also(pt.validateEmbeddedOrType()) - - cfg := config.FromContextOrDefaults(ctx) - // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task - switch { - case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": - errs = errs.Also(pt.validateCustomTask()) - case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": - errs = errs.Also(pt.validateCustomTask()) - // If EnableTektonOCIBundles feature flag is on, validate bundle specifications - case cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef != nil && pt.TaskRef.Bundle != "": - errs = errs.Also(pt.validateBundle()) - default: - errs = errs.Also(pt.validateTask(ctx)) - } - return -} - // Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering func (pt PipelineTask) Deps() []string { // hold the list of dependencies in a set to avoid duplicates deps := sets.NewString() - // add any new dependents from a resource/workspace - if pt.Resources != nil { - for _, rd := range pt.Resources.Inputs { - for _, f := range rd.From { - deps.Insert(f) - } - } - } - // add any new dependents from result references - resource dependency for _, ref := range PipelineTaskResultRefs(&pt) { deps.Insert(ref.PipelineTask) @@ -537,83 +296,12 @@ func (l PipelineTaskList) Names() sets.String { return names } -// Validate a list of pipeline tasks including custom task and bundles -func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, path string) (errs *apis.FieldError) { - for i, t := range l { - // validate pipeline task name - errs = errs.Also(t.ValidateName().ViaFieldIndex(path, i)) - // names cannot be duplicated - checking that pipelineTask names are unique - if _, ok := taskNames[t.Name]; ok { - errs = errs.Also(apis.ErrMultipleOneOf("name").ViaFieldIndex(path, i)) - } - taskNames.Insert(t.Name) - // validate custom task, bundle, dag, or final task - errs = errs.Also(t.Validate(ctx).ViaFieldIndex(path, i)) - } - return errs -} - // PipelineTaskParam is used to provide arbitrary string parameters to a Task. type PipelineTaskParam struct { Name string `json:"name"` Value string `json:"value"` } -// PipelineDeclaredResource is used by a Pipeline to declare the types of the -// PipelineResources that it will required to run and names which can be used to -// refer to these PipelineResources in PipelineTaskResourceBindings. -type PipelineDeclaredResource struct { - // Name is the name that will be used by the Pipeline to refer to this resource. - // It does not directly correspond to the name of any PipelineResources Task - // inputs or outputs, and it does not correspond to the actual names of the - // PipelineResources that will be bound in the PipelineRun. - Name string `json:"name"` - // Type is the type of the PipelineResource. - Type PipelineResourceType `json:"type"` - // Optional declares the resource as optional. - // optional: true - the resource is considered optional - // optional: false - the resource is considered required (default/equivalent of not specifying it) - Optional bool `json:"optional,omitempty"` -} - -// PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources -// should be provided to a Task as its inputs and outputs. -type PipelineTaskResources struct { - // Inputs holds the mapping from the PipelineResources declared in - // DeclaredPipelineResources to the input PipelineResources required by the Task. - // +listType=atomic - Inputs []PipelineTaskInputResource `json:"inputs,omitempty"` - // Outputs holds the mapping from the PipelineResources declared in - // DeclaredPipelineResources to the input PipelineResources required by the Task. - // +listType=atomic - Outputs []PipelineTaskOutputResource `json:"outputs,omitempty"` -} - -// PipelineTaskInputResource maps the name of a declared PipelineResource input -// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources -// that should be used. This input may come from a previous task. -type PipelineTaskInputResource struct { - // Name is the name of the PipelineResource as declared by the Task. - Name string `json:"name"` - // Resource is the name of the DeclaredPipelineResource to use. - Resource string `json:"resource"` - // From is the list of PipelineTask names that the resource has to come from. - // (Implies an ordering in the execution graph.) - // +optional - // +listType=atomic - From []string `json:"from,omitempty"` -} - -// PipelineTaskOutputResource maps the name of a declared PipelineResource output -// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources -// that should be used. -type PipelineTaskOutputResource struct { - // Name is the name of the PipelineResource as declared by the Task. - Name string `json:"name"` - // Resource is the name of the DeclaredPipelineResource to use. - Resource string `json:"resource"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PipelineList contains a list of Pipeline @@ -623,47 +311,3 @@ type PipelineList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []Pipeline `json:"items"` } - -func validateContainsExecutionStatusVariablesDisallowed(expressions []string, path string) (errs *apis.FieldError) { - if containsExecutionStatusReferences(expressions) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline tasks can not refer to execution status"+ - " of any other pipeline task or aggregate status of tasks"), path)) - } - return errs -} - -func containsExecutionStatusReferences(expressions []string) bool { - // validate tasks.pipelineTask.status/tasks.status if this expression is not a result reference - if !LooksLikeContainsResultRefs(expressions) { - for _, e := range expressions { - // check if it contains context variable accessing execution status - $(tasks.taskname.status) - // or an aggregate status - $(tasks.status) - if containsExecutionStatusRef(e) { - return true - } - } - } - return false -} - -func validateExecutionStatusVariablesExpressions(expressions []string, ptNames sets.String, fieldPath string) (errs *apis.FieldError) { - // validate tasks.pipelineTask.status if this expression is not a result reference - if !LooksLikeContainsResultRefs(expressions) { - for _, expression := range expressions { - // its a reference to aggregate status of dag tasks - $(tasks.status) - if expression == PipelineTasksAggregateStatus { - continue - } - // check if it contains context variable accessing execution status - $(tasks.taskname.status) - if containsExecutionStatusRef(expression) { - // strip tasks. and .status from tasks.taskname.status to further verify task name - pt := strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status") - // report an error if the task name does not exist in the list of dag tasks - if !ptNames.Has(pt) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath)) - } - } - } - } - return errs -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index d1c62c0036..c170e85a3b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -21,14 +21,16 @@ import ( "fmt" "strings" + "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" - "github.com/tektoncd/pipeline/pkg/list" + "github.com/tektoncd/pipeline/pkg/apis/version" "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" "github.com/tektoncd/pipeline/pkg/substitution" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" "knative.dev/pkg/webhook/resourcesemantics" ) @@ -57,11 +59,9 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { } // PipelineTask must have a valid unique label and at least one of taskRef or taskSpec should be specified errs = errs.Also(ValidatePipelineTasks(ctx, ps.Tasks, ps.Finally)) - // All declared resources should be used, and the Pipeline shouldn't try to use any resources - // that aren't declared - errs = errs.Also(validateDeclaredResources(ps.Resources, ps.Tasks, ps.Finally)) - // The from values should make sense - errs = errs.Also(validateFrom(ps.Tasks)) + if len(ps.Resources) > 0 { + errs = errs.Also(apis.ErrDisallowedFields("resources")) + } // Validate the pipeline task graph errs = errs.Also(validateGraph(ps.Tasks)) // The parameter variables should be valid @@ -95,6 +95,199 @@ func ValidatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks return errs } +// Validate a list of pipeline tasks including custom task and bundles +func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, path string) (errs *apis.FieldError) { + for i, t := range l { + // validate pipeline task name + errs = errs.Also(t.ValidateName().ViaFieldIndex(path, i)) + // names cannot be duplicated - checking that pipelineTask names are unique + if _, ok := taskNames[t.Name]; ok { + errs = errs.Also(apis.ErrMultipleOneOf("name").ViaFieldIndex(path, i)) + } + taskNames.Insert(t.Name) + // validate custom task, bundle, dag, or final task + errs = errs.Also(t.Validate(ctx).ViaFieldIndex(path, i)) + } + return errs +} + +// ValidateName checks whether the PipelineTask's name is a valid DNS label +func (pt PipelineTask) ValidateName() *apis.FieldError { + if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("invalid value %q", pt.Name), + Paths: []string{"name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + } + } + return nil +} + +// Validate classifies whether a task is a custom task, bundle, or a regular task(dag/final) +// calls the validation routine based on the type of the task +func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(pt.validateRefOrSpec()) + + errs = errs.Also(pt.validateEmbeddedOrType()) + + if pt.Resources != nil { + errs = errs.Also(apis.ErrDisallowedFields("resources")) + } + + cfg := config.FromContextOrDefaults(ctx) + // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task + switch { + case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": + errs = errs.Also(pt.validateCustomTask()) + case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": + errs = errs.Also(pt.validateCustomTask()) + // If EnableTektonOCIBundles feature flag is on, validate bundle specifications + case cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef != nil && pt.TaskRef.Bundle != "": + errs = errs.Also(pt.validateBundle()) + default: + errs = errs.Also(pt.validateTask(ctx)) + } + return +} + +func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { + if pt.IsMatrixed() { + // This is an alpha feature and will fail validation if it's used in a pipeline spec + // when the enable-api-fields feature gate is anything but "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) + errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx)) + } + errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params)) + errs = errs.Also(pt.Matrix.validateParams()) + return errs +} + +func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { + // Reject cases where APIVersion and/or Kind are specified alongside an embedded Task. + // We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items. + if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 { + if pt.TaskSpec.APIVersion != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.apiVersion"}, + }) + } + if pt.TaskSpec.Kind != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.kind cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.kind"}, + }) + } + } + return +} + +func (pt *PipelineTask) validateResultsFromMatrixedPipelineTasksNotConsumed(matrixedPipelineTasks sets.String) (errs *apis.FieldError) { + for _, ref := range PipelineTaskResultRefs(pt) { + if matrixedPipelineTasks.Has(ref.PipelineTask) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("consuming results from matrixed task %s is not allowed", ref.PipelineTask), "")) + } + } + return errs +} + +func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) { + workspaceBindingNames := sets.NewString() + for i, ws := range pt.Workspaces { + if workspaceBindingNames.Has(ws.Name) { + errs = errs.Also(apis.ErrGeneric( + fmt.Sprintf("workspace name %q must be unique", ws.Name), "").ViaFieldIndex("workspaces", i)) + } + + if ws.Workspace == "" { + if !workspaceNames.Has(ws.Name) { + errs = errs.Also(apis.ErrInvalidValue( + fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name), + "", + ).ViaFieldIndex("workspaces", i)) + } + } else if !workspaceNames.Has(ws.Workspace) { + errs = errs.Also(apis.ErrInvalidValue( + fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), + "", + ).ViaFieldIndex("workspaces", i)) + } + + workspaceBindingNames.Insert(ws.Name) + } + return errs +} + +// validateRefOrSpec validates at least one of taskRef or taskSpec is specified +func (pt PipelineTask) validateRefOrSpec() (errs *apis.FieldError) { + // can't have both taskRef and taskSpec at the same time + if pt.TaskRef != nil && pt.TaskSpec != nil { + errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec")) + } + // Check that one of TaskRef and TaskSpec is present + if pt.TaskRef == nil && pt.TaskSpec == nil { + errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec")) + } + return errs +} + +// validateCustomTask validates custom task specifications - checking kind and fail if not yet supported features specified +func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { + if pt.TaskRef != nil && pt.TaskRef.Kind == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify kind", "taskRef.kind")) + } + if pt.TaskSpec != nil && pt.TaskSpec.Kind == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind")) + } + if pt.TaskRef != nil && pt.TaskRef.APIVersion == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify apiVersion", "taskRef.apiVersion")) + } + if pt.TaskSpec != nil && pt.TaskSpec.APIVersion == "" { + errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion")) + } + return errs +} + +// validateBundle validates bundle specifications - checking name and bundle +func (pt PipelineTask) validateBundle() (errs *apis.FieldError) { + // bundle requires a TaskRef to be specified + if (pt.TaskRef != nil && pt.TaskRef.Bundle != "") && pt.TaskRef.Name == "" { + errs = errs.Also(apis.ErrMissingField("taskRef.name")) + } + // If a bundle url is specified, ensure it is parsable + if pt.TaskRef != nil && pt.TaskRef.Bundle != "" { + if _, err := name.ParseReference(pt.TaskRef.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid bundle reference (%s)", err.Error()), "taskRef.bundle")) + } + } + return errs +} + +// validateTask validates a pipeline task or a final task for taskRef and taskSpec +func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { + cfg := config.FromContextOrDefaults(ctx) + // Validate TaskSpec if it's present + if pt.TaskSpec != nil { + errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) + } + if pt.TaskRef != nil { + if pt.TaskRef.Name != "" { + // TaskRef name must be a valid k8s name + if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "taskRef.name")) + } + } else if pt.TaskRef.Resolver == "" { + errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) + } + // fail if bundle is present when EnableTektonOCIBundles feature flag is off (as it won't be allowed nor used) + if !cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef.Bundle != "" { + errs = errs.Also(apis.ErrDisallowedFields("taskRef.bundle")) + } + } + return errs +} + // validatePipelineWorkspacesDeclarations validates the specified workspaces, ensuring having unique name without any // empty string, func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) (errs *apis.FieldError) { @@ -168,7 +361,7 @@ func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, pa for idx, task := range tasks { errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) if task.IsMatrixed() { - errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + errs = errs.Also(task.Matrix.validatePipelineParametersVariablesInMatrixParameters(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } @@ -189,14 +382,7 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { ) var paramValues []string for _, task := range tasks { - var matrixParams []Param - if task.IsMatrixed() { - matrixParams = task.Matrix.Params - } - for _, param := range append(task.Params, matrixParams...) { - paramValues = append(paramValues, param.Value.StringVal) - paramValues = append(paramValues, param.Value.ArrayVal...) - } + paramValues = task.extractAllParams().extractValues() } errs := validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineRun", pipelineRunContextNames). Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipeline", pipelineContextNames)). @@ -204,6 +390,23 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { return errs } +// extractAllParams extracts all the parameters in a PipelineTask: +// - pt.Params +// - pt.Matrix.Params +// - pt.Matrix.Include.Params +func (pt *PipelineTask) extractAllParams() Params { + allParams := pt.Params + if pt.Matrix.HasParams() { + allParams = append(allParams, pt.Matrix.Params...) + } + if pt.Matrix.HasInclude() { + for _, include := range pt.Matrix.Include { + allParams = append(allParams, include.Params...) + } + } + return allParams +} + func containsExecutionStatusRef(p string) bool { if strings.HasPrefix(p, "tasks.") && strings.HasSuffix(p, ".status") { return true @@ -211,6 +414,12 @@ func containsExecutionStatusRef(p string) bool { return false } +func validateExecutionStatusVariables(tasks []PipelineTask, finallyTasks []PipelineTask) (errs *apis.FieldError) { + errs = errs.Also(validateExecutionStatusVariablesInTasks(tasks).ViaField("tasks")) + errs = errs.Also(validateExecutionStatusVariablesInFinally(PipelineTaskList(tasks).Names(), finallyTasks).ViaField("finally")) + return errs +} + // validate dag pipeline tasks, task params can not access execution status of any other task // dag tasks cannot have param value as $(tasks.pipelineTask.status) func validateExecutionStatusVariablesInTasks(tasks []PipelineTask) (errs *apis.FieldError) { @@ -229,9 +438,79 @@ func validateExecutionStatusVariablesInFinally(tasksNames sets.String, finally [ return errs } -func validateExecutionStatusVariables(tasks []PipelineTask, finallyTasks []PipelineTask) (errs *apis.FieldError) { - errs = errs.Also(validateExecutionStatusVariablesInTasks(tasks).ViaField("tasks")) - errs = errs.Also(validateExecutionStatusVariablesInFinally(PipelineTaskList(tasks).Names(), finallyTasks).ViaField("finally")) +func (pt *PipelineTask) validateExecutionStatusVariablesDisallowed() (errs *apis.FieldError) { + for _, param := range pt.Params { + if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { + errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "value"). + ViaFieldKey("params", param.Name)) + } + } + for i, we := range pt.WhenExpressions { + if expressions, ok := we.GetVarSubstitutionExpressions(); ok { + errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, ""). + ViaFieldIndex("when", i)) + } + } + return errs +} + +func validateContainsExecutionStatusVariablesDisallowed(expressions []string, path string) (errs *apis.FieldError) { + if containsExecutionStatusReferences(expressions) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline tasks can not refer to execution status"+ + " of any other pipeline task or aggregate status of tasks"), path)) + } + return errs +} + +func containsExecutionStatusReferences(expressions []string) bool { + // validate tasks.pipelineTask.status/tasks.status if this expression is not a result reference + if !LooksLikeContainsResultRefs(expressions) { + for _, e := range expressions { + // check if it contains context variable accessing execution status - $(tasks.taskname.status) + // or an aggregate status - $(tasks.status) + if containsExecutionStatusRef(e) { + return true + } + } + } + return false +} + +func (pt *PipelineTask) validateExecutionStatusVariablesAllowed(ptNames sets.String) (errs *apis.FieldError) { + for _, param := range pt.Params { + if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok { + errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "value"). + ViaFieldKey("params", param.Name)) + } + } + for i, we := range pt.WhenExpressions { + if expressions, ok := we.GetVarSubstitutionExpressions(); ok { + errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, ""). + ViaFieldIndex("when", i)) + } + } + return errs +} + +func validateExecutionStatusVariablesExpressions(expressions []string, ptNames sets.String, fieldPath string) (errs *apis.FieldError) { + // validate tasks.pipelineTask.status if this expression is not a result reference + if !LooksLikeContainsResultRefs(expressions) { + for _, expression := range expressions { + // its a reference to aggregate status of dag tasks - $(tasks.status) + if expression == PipelineTasksAggregateStatus { + continue + } + // check if it contains context variable accessing execution status - $(tasks.taskname.status) + if containsExecutionStatusRef(expression) { + // strip tasks. and .status from tasks.taskname.status to further verify task name + pt := strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status") + // report an error if the task name does not exist in the list of dag tasks + if !ptNames.Has(pt) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath)) + } + } + } + } return errs } @@ -338,7 +617,6 @@ func validateFinalTasks(tasks []PipelineTask, finalTasks []PipelineTask) (errs * fts := PipelineTaskList(finalTasks).Names() errs = errs.Also(validateTaskResultReferenceInFinallyTasks(finalTasks, ts, fts)) - errs = errs.Also(validateTasksInputFrom(finalTasks).ViaField("finally")) return errs } @@ -378,22 +656,6 @@ func validateResultsVariablesExpressionsInFinally(expressions []string, pipeline return errs } -func validateTasksInputFrom(tasks []PipelineTask) (errs *apis.FieldError) { - for idx, t := range tasks { - inputResources := []PipelineTaskInputResource{} - if t.Resources != nil { - inputResources = append(inputResources, t.Resources.Inputs...) - } - for i, rd := range inputResources { - if len(rd.From) != 0 { - errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("no from allowed under inputs,"+ - " final task %s has from specified", rd.Name), "").ViaFieldIndex("inputs", i).ViaField("resources").ViaIndex(idx)) - } - } - } - return errs -} - func validateWhenExpressions(tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) { for i, t := range tasks { errs = errs.Also(t.WhenExpressions.validate().ViaFieldIndex("tasks", i)) @@ -404,93 +666,6 @@ func validateWhenExpressions(tasks []PipelineTask, finalTasks []PipelineTask) (e return errs } -// validateDeclaredResources ensures that the specified resources have unique names and -// validates that all the resources referenced by pipeline tasks are declared in the pipeline -func validateDeclaredResources(resources []PipelineDeclaredResource, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - if encountered.Has(r.Name) { - return apis.ErrInvalidValue(fmt.Sprintf("resource with name %q appears more than once", r.Name), "resources") - } - encountered.Insert(r.Name) - } - required := []string{} - for _, t := range tasks { - if t.Resources != nil { - for _, input := range t.Resources.Inputs { - required = append(required, input.Resource) - } - for _, output := range t.Resources.Outputs { - required = append(required, output.Resource) - } - } - } - for _, t := range finalTasks { - if t.Resources != nil { - for _, input := range t.Resources.Inputs { - required = append(required, input.Resource) - } - for _, output := range t.Resources.Outputs { - required = append(required, output.Resource) - } - } - } - - provided := make([]string, 0, len(resources)) - for _, resource := range resources { - provided = append(provided, resource.Name) - } - missing := list.DiffLeft(required, provided) - if len(missing) > 0 { - return apis.ErrInvalidValue(fmt.Sprintf("pipeline declared resources didn't match usage in Tasks: Didn't provide required values: %s", missing), "resources") - } - return nil -} - -func isOutput(outputs []PipelineTaskOutputResource, resource string) bool { - for _, output := range outputs { - if output.Resource == resource { - return true - } - } - return false -} - -// validateFrom ensures that the `from` values make sense: that they rely on values from Tasks -// that ran previously, and that the PipelineResource is actually an output of the Task it should come from. -func validateFrom(tasks []PipelineTask) (errs *apis.FieldError) { - taskOutputs := map[string][]PipelineTaskOutputResource{} - for _, task := range tasks { - var to []PipelineTaskOutputResource - if task.Resources != nil { - to = make([]PipelineTaskOutputResource, len(task.Resources.Outputs)) - copy(to, task.Resources.Outputs) - } - taskOutputs[task.Name] = to - } - for i, t := range tasks { - inputResources := []PipelineTaskInputResource{} - if t.Resources != nil { - inputResources = append(inputResources, t.Resources.Inputs...) - } - - for j, rd := range inputResources { - for _, pt := range rd.From { - outputs, found := taskOutputs[pt] - if !found { - return apis.ErrInvalidValue(fmt.Sprintf("expected resource %s to be from task %s, but task %s doesn't exist", rd.Resource, pt, pt), - "from").ViaFieldIndex("inputs", j).ViaField("resources").ViaFieldIndex("tasks", i) - } - if !isOutput(outputs, rd.Resource) { - return apis.ErrInvalidValue(fmt.Sprintf("the resource %s from %s must be an output but is an input", rd.Resource, pt), - "from").ViaFieldIndex("inputs", j).ViaField("resources").ViaFieldIndex("tasks", i) - } - } - } - } - return errs -} - // validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency // cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource // is actually an output of the Task it should come from. @@ -523,3 +698,51 @@ func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, f } return errs } + +// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. +// error is returned when the array indexing reference is out of bound of the array param +// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. +func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { + if !config.CheckAlphaOrBetaAPIFields(ctx) { + return nil + } + + // Collect all array params lengths + arrayParamsLengths := ps.Params.extractParamArrayLengths() + for k, v := range params.extractParamArrayLengths() { + arrayParamsLengths[k] = v + } + + paramsRefs := []string{} + for i := range ps.Tasks { + paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...) + if ps.Tasks[i].IsMatrixed() { + paramsRefs = append(paramsRefs, ps.Tasks[i].Matrix.Params.extractValues()...) + } + for j := range ps.Tasks[i].Workspaces { + paramsRefs = append(paramsRefs, ps.Tasks[i].Workspaces[j].SubPath) + } + for _, wes := range ps.Tasks[i].WhenExpressions { + paramsRefs = append(paramsRefs, wes.Input) + paramsRefs = append(paramsRefs, wes.Values...) + } + } + + for i := range ps.Finally { + paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...) + if ps.Finally[i].IsMatrixed() { + paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...) + } + for _, wes := range ps.Finally[i].WhenExpressions { + paramsRefs = append(paramsRefs, wes.Values...) + } + } + + // extract all array indexing references, for example []{"$(params.array-params[1])"} + arrayIndexParamRefs := []string{} + for _, p := range paramsRefs { + arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) + } + + return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go index d2c7d4bda4..88fed430cc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( @@ -32,7 +48,7 @@ func (pr PipelineRef) convertBundleToResolver(sink *v1.PipelineRef) { if pr.Bundle != "" { sink.ResolverRef = v1.ResolverRef{ Resolver: "bundles", - Params: []v1.Param{{ + Params: v1.Params{{ Name: "bundle", Value: v1.ParamValue{StringVal: pr.Bundle, Type: v1.ParamTypeString}, }, { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go index efb6f13cd3..ab943a3242 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go @@ -24,6 +24,7 @@ type PipelineRef struct { // +optional APIVersion string `json:"apiVersion,omitempty"` // Bundle url reference to a Tekton Bundle. + // // Deprecated: Please use ResolverRef with the bundles resolver instead. // +optional Bundle string `json:"bundle,omitempty"` diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go index c52e2d1de3..6186c177a2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go @@ -64,7 +64,7 @@ func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) { } } } - return + return //nolint:nakedret } func validateBundleFeatureFlag(ctx context.Context, featureName string, wantValue bool) *apis.FieldError { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go index 1e0ddf01ab..68a475233f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go @@ -21,8 +21,6 @@ import ( "fmt" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" - "github.com/tektoncd/pipeline/pkg/apis/version" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) @@ -36,9 +34,6 @@ func (pr *PipelineRun) ConvertTo(ctx context.Context, to apis.Convertible) error switch sink := to.(type) { case *v1.PipelineRun: sink.ObjectMeta = pr.ObjectMeta - if err := serializePipelineRunResources(&sink.ObjectMeta, &pr.Spec); err != nil { - return err - } if err := pr.Status.convertTo(ctx, &sink.Status); err != nil { return err } @@ -99,9 +94,6 @@ func (pr *PipelineRun) ConvertFrom(ctx context.Context, from apis.Convertible) e switch source := from.(type) { case *v1.PipelineRun: pr.ObjectMeta = source.ObjectMeta - if err := deserializePipelineRunResources(&pr.ObjectMeta, &pr.Spec); err != nil { - return err - } if err := pr.Status.convertFrom(ctx, &source.Status); err != nil { return err } @@ -351,22 +343,3 @@ func (csr *ChildStatusReference) convertFrom(ctx context.Context, source v1.Chil csr.WhenExpressions = append(csr.WhenExpressions, new) } } - -func serializePipelineRunResources(meta *metav1.ObjectMeta, spec *PipelineRunSpec) error { - if spec.Resources == nil { - return nil - } - return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) -} - -func deserializePipelineRunResources(meta *metav1.ObjectMeta, spec *PipelineRunSpec) error { - resources := []PipelineResourceBinding{} - err := version.DeserializeFromMetadata(meta, &resources, resourcesAnnotationKey) - if err != nil { - return err - } - if len(resources) != 0 { - spec.Resources = resources - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go index a47a0d3e55..86592824a2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go @@ -36,6 +36,10 @@ func (pr *PipelineRun) SetDefaults(ctx context.Context) { // SetDefaults implements apis.Defaultable func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { cfg := config.FromContextOrDefaults(ctx) + if prs.PipelineRef != nil && prs.PipelineRef.Name == "" && prs.PipelineRef.Resolver == "" { + prs.PipelineRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType) + } + if prs.Timeout == nil && prs.Timeouts == nil { prs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index abfb42d476..83bf2c22e1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -18,16 +18,16 @@ package v1beta1 import ( "context" + "fmt" "time" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "github.com/tektoncd/pipeline/pkg/apis/config" apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/clock" @@ -156,6 +156,22 @@ func (pr *PipelineRun) GetNamespacedName() types.NamespacedName { return types.NamespacedName{Namespace: pr.Namespace, Name: pr.Name} } +// IsTimeoutConditionSet returns true when the pipelinerun has the pipelinerun timed out reason +func (pr *PipelineRun) IsTimeoutConditionSet() bool { + condition := pr.Status.GetCondition(apis.ConditionSucceeded) + return condition.IsFalse() && condition.Reason == PipelineRunReasonTimedOut.String() +} + +// SetTimeoutCondition sets the status of the PipelineRun to timed out. +func (pr *PipelineRun) SetTimeoutCondition(ctx context.Context) { + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: PipelineRunReasonTimedOut.String(), + Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()), + }) +} + // HasTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { timeout := pr.PipelineTimeout(ctx) @@ -173,6 +189,19 @@ func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bo return false } +// HasTimedOutForALongTime returns true if a pipelinerun has exceeed its spec.Timeout based its status.StartTime +// by a large margin +func (pr *PipelineRun) HasTimedOutForALongTime(ctx context.Context, c clock.PassiveClock) bool { + if !pr.HasTimedOut(ctx, c) { + return false + } + timeout := pr.PipelineTimeout(ctx) + startTime := pr.Status.StartTime + runtime := c.Since(startTime.Time) + // We are arbitrarily defining large margin as doubling the spec.timeout + return runtime >= 2*timeout +} + // HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool { timeout := pr.TasksTimeout() @@ -227,11 +256,13 @@ type PipelineRunSpec struct { // Resources is a list of bindings specifying which actual instances of // PipelineResources to use for the resources the Pipeline has declared // it needs. + // + // Deprecated: Unused, preserved only for backwards compatibility // +listType=atomic Resources []PipelineResourceBinding `json:"resources,omitempty"` // Params is a list of parameter names and values. // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` // +optional ServiceAccountName string `json:"serviceAccountName,omitempty"` @@ -245,9 +276,12 @@ type PipelineRunSpec struct { // +optional Timeouts *TimeoutFields `json:"timeouts,omitempty"` - // Timeout Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead - // Time after which the Pipeline times out. Defaults to never. + // Timeout is the Time after which the Pipeline times out. + // Defaults to never. // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // + // Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead + // // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` // PodTemplate holds pod specific configuration @@ -416,6 +450,22 @@ type PipelineRunStatusFields struct { // CompletionTime is the time the PipelineRun completed. CompletionTime *metav1.Time `json:"completionTime,omitempty"` + // TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key. + // + // Deprecated: use ChildReferences instead. As of v0.45.0, this field is no + // longer populated and is only included for backwards compatibility with + // older server versions. + // +optional + TaskRuns map[string]*PipelineRunTaskRunStatus `json:"taskRuns,omitempty"` + + // Runs is a map of PipelineRunRunStatus with the run name as the key + // + // Deprecated: use ChildReferences instead. As of v0.45.0, this field is no + // longer populated and is only included for backwards compatibility with + // older server versions. + // +optional + Runs map[string]*PipelineRunRunStatus `json:"runs,omitempty"` + // PipelineResults are the list of results written out by the pipeline task's containers // +optional // +listType=atomic @@ -482,6 +532,8 @@ const ( TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached" // FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally. FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached" + // EmptyArrayInMatrixParams means the task was skipped because Matrix parameters contain empty array. + EmptyArrayInMatrixParams SkippingReason = "Matrix Parameters have an empty array" // None means the task was not skipped None SkippingReason = "None" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index 4469f902a8..ed7ac5cce1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -131,6 +131,9 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) if ps.PodTemplate != nil { errs = errs.Also(validatePodTemplateEnv(ctx, *ps.PodTemplate)) } + if ps.Resources != nil { + errs = errs.Also(apis.ErrDisallowedFields("resources")) + } return errs } @@ -239,7 +242,7 @@ func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs * return errs } -func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params []Param) map[string]ParamSpec { +func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params Params) map[string]ParamSpec { for _, p := range params { if pSpec, ok := paramSpecForValidation[p.Name]; ok { if p.Value.ObjectVal != nil { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance.go index 1cb8fca722..3ae27eb55d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance.go @@ -15,37 +15,53 @@ package v1beta1 import "github.com/tektoncd/pipeline/pkg/apis/config" -// Provenance contains some key authenticated metadata about how a software artifact was -// built (what sources, what inputs/outputs, etc.). For now, it only contains the subfield -// `ConfigSource` that identifies the source where a build config file came from. -// In future, it can be expanded as needed to include more metadata about the build. -// This field aims to be used to carry minimum amount of the authenticated metadata in *Run status -// so that Tekton Chains can pick it up and record in the provenance it generates. +// Provenance contains metadata about resources used in the TaskRun/PipelineRun +// such as the source from where a remote build definition was fetched. +// This field aims to carry minimum amoumt of metadata in *Run status so that +// Tekton Chains can capture them in the provenance. type Provenance struct { - // ConfigSource identifies the source where a resource came from. + // Deprecated: Use RefSource instead ConfigSource *ConfigSource `json:"configSource,omitempty"` + // RefSource identifies the source where a remote task/pipeline came from. + RefSource *RefSource `json:"refSource,omitempty"` + // FeatureFlags identifies the feature flags that were used during the task/pipeline run FeatureFlags *config.FeatureFlags `json:"featureFlags,omitempty"` } -// ConfigSource identifies the source where a resource came from. -// This can include Git repositories, Task Bundles, file checksums, or other information -// that allows users to identify where the resource came from and what version was used. +// RefSource contains the information that can uniquely identify where a remote +// built definition came from i.e. Git repositories, Tekton Bundles in OCI registry +// and hub. +type RefSource struct { + // URI indicates the identity of the source of the build definition. + // Example: "https://github.com/tektoncd/catalog" + URI string `json:"uri,omitempty"` + + // Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. + // Example: {"sha1": "f99d13e554ffcb696dee719fa85b695cb5b0f428"} + Digest map[string]string `json:"digest,omitempty"` + + // EntryPoint identifies the entry point into the build. This is often a path to a + // build definition file and/or a target label within that file. + // Example: "task/git-clone/0.8/git-clone.yaml" + EntryPoint string `json:"entryPoint,omitempty"` +} + +// ConfigSource contains the information that can uniquely identify where a remote +// built definition came from i.e. Git repositories, Tekton Bundles in OCI registry +// and hub. type ConfigSource struct { - // URI indicates the identity of the source of the config. - // Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.uri + // URI indicates the identity of the source of the build definition. // Example: "https://github.com/tektoncd/catalog" URI string `json:"uri,omitempty"` // Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. - // Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.digest // Example: {"sha1": "f99d13e554ffcb696dee719fa85b695cb5b0f428"} Digest map[string]string `json:"digest,omitempty"` // EntryPoint identifies the entry point into the build. This is often a path to a - // configuration file and/or a target label within that file. - // Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.entryPoint + // build definition file and/or a target label within that file. // Example: "task/git-clone/0.8/git-clone.yaml" EntryPoint string `json:"entryPoint,omitempty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance_conversion.go index 4f68e9dac5..4e4afe25b5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/provenance_conversion.go @@ -20,10 +20,10 @@ import ( ) func (p Provenance) convertTo(ctx context.Context, sink *v1.Provenance) { - if p.ConfigSource != nil { - new := v1.ConfigSource{} - p.ConfigSource.convertTo(ctx, &new) - sink.ConfigSource = &new + if p.RefSource != nil { + new := v1.RefSource{} + p.RefSource.convertTo(ctx, &new) + sink.RefSource = &new } if p.FeatureFlags != nil { sink.FeatureFlags = p.FeatureFlags @@ -31,23 +31,23 @@ func (p Provenance) convertTo(ctx context.Context, sink *v1.Provenance) { } func (p *Provenance) convertFrom(ctx context.Context, source v1.Provenance) { - if source.ConfigSource != nil { - new := ConfigSource{} - new.convertFrom(ctx, *source.ConfigSource) - p.ConfigSource = &new + if source.RefSource != nil { + new := RefSource{} + new.convertFrom(ctx, *source.RefSource) + p.RefSource = &new } if source.FeatureFlags != nil { p.FeatureFlags = source.FeatureFlags } } -func (cs ConfigSource) convertTo(ctx context.Context, sink *v1.ConfigSource) { +func (cs RefSource) convertTo(ctx context.Context, sink *v1.RefSource) { sink.URI = cs.URI sink.Digest = cs.Digest sink.EntryPoint = cs.EntryPoint } -func (cs *ConfigSource) convertFrom(ctx context.Context, source v1.ConfigSource) { +func (cs *RefSource) convertFrom(ctx context.Context, source v1.RefSource) { cs.URI = source.URI cs.Digest = source.Digest cs.EntryPoint = source.EntryPoint diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go index 18d3c07bb6..3bbed85032 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go index 1cb0c85fe2..70b1c78867 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go @@ -33,5 +33,5 @@ type ResolverRef struct { // the chosen resolver. // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_paths.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_paths.go deleted file mode 100644 index 260a2ed5f9..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_paths.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import "path/filepath" - -// InputResourcePath returns the path where the given input resource -// will get mounted in a Pod -func InputResourcePath(r ResourceDeclaration) string { - return path("/workspace", r) -} - -// OutputResourcePath returns the path to the output resource in a Pod -func OutputResourcePath(r ResourceDeclaration) string { - return path("/workspace/output", r) -} - -func path(root string, r ResourceDeclaration) string { - if r.TargetPath != "" { - if filepath.IsAbs(r.TargetPath) { - return r.TargetPath - } - return filepath.Join("/workspace", r.TargetPath) - } - return filepath.Join(root, r.Name) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go index 41ad57bd34..0e5ec62de3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go @@ -17,41 +17,59 @@ limitations under the License. package v1beta1 import ( - "encoding/json" - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-multierror" resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + "github.com/tektoncd/pipeline/pkg/result" v1 "k8s.io/api/core/v1" ) -// PipelineResourceType represents the type of endpoint the pipelineResource is, so that the -// controller will know this pipelineResource should be fetched and optionally what -// additional metatdata should be provided for it. -type PipelineResourceType = resource.PipelineResourceType +// RunResult is used to write key/value pairs to TaskRun pod termination messages. +// It has been migrated to the result package and kept for backward compatibility +type RunResult = result.RunResult -var ( - // AllowedOutputResources are the resource types that can be used as outputs - AllowedOutputResources = resource.AllowedOutputResources -) +// PipelineResourceResult has been deprecated with the migration of PipelineResources +// Deprecated: Use RunResult instead +type PipelineResourceResult = result.RunResult -const ( - // PipelineResourceTypeGit indicates that this source is a GitHub repo. - PipelineResourceTypeGit PipelineResourceType = resource.PipelineResourceTypeGit +// ResultType of PipelineResourceResult has been deprecated with the migration of PipelineResources +// Deprecated: v1beta1.ResultType is only kept for backward compatibility +type ResultType = result.ResultType - // PipelineResourceTypeStorage indicates that this source is a storage blob resource. - PipelineResourceTypeStorage PipelineResourceType = resource.PipelineResourceTypeStorage +// ResourceParam declares a string value to use for the parameter called Name, and is used in +// the specific context of PipelineResources. +// +// Deprecated: Unused, preserved only for backwards compatibility +type ResourceParam = resource.ResourceParam - // PipelineResourceTypeImage indicates that this source is a docker Image. - PipelineResourceTypeImage PipelineResourceType = resource.PipelineResourceTypeImage -) +// PipelineResourceType represents the type of endpoint the pipelineResource is, so that the +// controller will know this pipelineResource should be fetched and optionally what +// additional metatdata should be provided for it. +// +// Deprecated: Unused, preserved only for backwards compatibility +type PipelineResourceType = resource.PipelineResourceType -// AllResourceTypes can be used for validation to check if a provided Resource type is one of the known types. -var AllResourceTypes = resource.AllResourceTypes +// PipelineDeclaredResource is used by a Pipeline to declare the types of the +// PipelineResources that it will required to run and names which can be used to +// refer to these PipelineResources in PipelineTaskResourceBindings. +// +// Deprecated: Unused, preserved only for backwards compatibility +type PipelineDeclaredResource struct { + // Name is the name that will be used by the Pipeline to refer to this resource. + // It does not directly correspond to the name of any PipelineResources Task + // inputs or outputs, and it does not correspond to the actual names of the + // PipelineResources that will be bound in the PipelineRun. + Name string `json:"name"` + // Type is the type of the PipelineResource. + Type PipelineResourceType `json:"type"` + // Optional declares the resource as optional. + // optional: true - the resource is considered optional + // optional: false - the resource is considered required (default/equivalent of not specifying it) + Optional bool `json:"optional,omitempty"` +} // TaskResources allows a Pipeline to declare how its DeclaredPipelineResources // should be provided to a Task as its inputs and outputs. +// +// Deprecated: Unused, preserved only for backwards compatibility type TaskResources struct { // Inputs holds the mapping from the PipelineResources declared in // DeclaredPipelineResources to the input PipelineResources required by the Task. @@ -68,11 +86,15 @@ type TaskResources struct { // the Task definition, and when provided as an Input, the Name will be the // path to the volume mounted containing this Resource as an input (e.g. // an input Resource named `workspace` will be mounted at `/workspace`). +// +// Deprecated: Unused, preserved only for backwards compatibility type TaskResource struct { ResourceDeclaration `json:",inline"` } // TaskRunResources allows a TaskRun to declare inputs and outputs TaskResourceBinding +// +// Deprecated: Unused, preserved only for backwards compatibility type TaskRunResources struct { // Inputs holds the inputs resources this task was invoked with // +listType=atomic @@ -84,6 +106,8 @@ type TaskRunResources struct { // TaskResourceBinding points to the PipelineResource that // will be used for the Task input or output called Name. +// +// Deprecated: Unused, preserved only for backwards compatibility type TaskResourceBinding struct { PipelineResourceBinding `json:",inline"` // Paths will probably be removed in #1284, and then PipelineResourceBinding can be used instead. @@ -99,10 +123,14 @@ type TaskResourceBinding struct { // PipelineResources within the type's definition, and when provided as an Input, the Name will be the // path to the volume mounted containing this PipelineResource as an input (e.g. // an input Resource named `workspace` will be mounted at `/workspace`). +// +// Deprecated: Unused, preserved only for backwards compatibility type ResourceDeclaration = resource.ResourceDeclaration // PipelineResourceBinding connects a reference to an instance of a PipelineResource // with a PipelineResource dependency that the Pipeline has declared +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceBinding struct { // Name is the name of the PipelineResource in the Pipeline's declaration Name string `json:"name,omitempty"` @@ -117,58 +145,74 @@ type PipelineResourceBinding struct { ResourceSpec *resource.PipelineResourceSpec `json:"resourceSpec,omitempty"` } -// PipelineResourceResult used to export the image name and digest as json -type PipelineResourceResult struct { - Key string `json:"key"` - Value string `json:"value"` - ResourceName string `json:"resourceName,omitempty"` - ResultType ResultType `json:"type,omitempty"` +// PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources +// should be provided to a Task as its inputs and outputs. +// +// Deprecated: Unused, preserved only for backwards compatibility +type PipelineTaskResources struct { + // Inputs holds the mapping from the PipelineResources declared in + // DeclaredPipelineResources to the input PipelineResources required by the Task. + // +listType=atomic + Inputs []PipelineTaskInputResource `json:"inputs,omitempty"` + // Outputs holds the mapping from the PipelineResources declared in + // DeclaredPipelineResources to the input PipelineResources required by the Task. + // +listType=atomic + Outputs []PipelineTaskOutputResource `json:"outputs,omitempty"` } -// ResultType used to find out whether a PipelineResourceResult is from a task result or not -// Note that ResultsType is another type which is used to define the data type -// (e.g. string, array, etc) we used for Results -type ResultType int - -// UnmarshalJSON unmarshals either an int or a string into a ResultType. String -// ResultTypes were removed because they made JSON messages bigger, which in -// turn limited the amount of space in termination messages for task results. String -// support is maintained for backwards compatibility - the Pipelines controller could -// be stopped midway through TaskRun execution, updated with support for int in place -// of string, and then fail the running TaskRun because it doesn't know how to interpret -// the string value that the TaskRun's entrypoint will emit when it completes. -func (r *ResultType) UnmarshalJSON(data []byte) error { - var asInt int - var intErr error - - if err := json.Unmarshal(data, &asInt); err != nil { - intErr = err - } else { - *r = ResultType(asInt) - return nil - } - - var asString string +// PipelineTaskInputResource maps the name of a declared PipelineResource input +// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources +// that should be used. This input may come from a previous task. +// +// Deprecated: Unused, preserved only for backwards compatibility +type PipelineTaskInputResource struct { + // Name is the name of the PipelineResource as declared by the Task. + Name string `json:"name"` + // Resource is the name of the DeclaredPipelineResource to use. + Resource string `json:"resource"` + // From is the list of PipelineTask names that the resource has to come from. + // (Implies an ordering in the execution graph.) + // +optional + // +listType=atomic + From []string `json:"from,omitempty"` +} - if err := json.Unmarshal(data, &asString); err != nil { - return fmt.Errorf("unsupported value type, neither int nor string: %v", multierror.Append(intErr, err).ErrorOrNil()) - } +// PipelineTaskOutputResource maps the name of a declared PipelineResource output +// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources +// that should be used. +// +// Deprecated: Unused, preserved only for backwards compatibility +type PipelineTaskOutputResource struct { + // Name is the name of the PipelineResource as declared by the Task. + Name string `json:"name"` + // Resource is the name of the DeclaredPipelineResource to use. + Resource string `json:"resource"` +} - switch asString { - case "TaskRunResult": - *r = TaskRunResultType - case "PipelineResourceResult": - *r = PipelineResourceResultType - case "InternalTektonResult": - *r = InternalTektonResultType - default: - *r = UnknownResultType - } +// TaskRunInputs holds the input values that this task was invoked with. +// +// Deprecated: Unused, preserved only for backwards compatibility +type TaskRunInputs struct { + // +optional + // +listType=atomic + Resources []TaskResourceBinding `json:"resources,omitempty"` + // +optional + // +listType=atomic + Params []Param `json:"params,omitempty"` +} - return nil +// TaskRunOutputs holds the output values that this task was invoked with. +// +// Deprecated: Unused, preserved only for backwards compatibility +type TaskRunOutputs struct { + // +optional + // +listType=atomic + Resources []TaskResourceBinding `json:"resources,omitempty"` } // PipelineResourceRef can be used to refer to a specific instance of a Resource +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceRef struct { // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name,omitempty"` @@ -178,6 +222,8 @@ type PipelineResourceRef struct { } // PipelineResourceInterface interface to be implemented by different PipelineResource types +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceInterface interface { // GetName returns the name of this PipelineResource instance. GetName() string @@ -195,6 +241,8 @@ type PipelineResourceInterface interface { } // TaskModifier is an interface to be implemented by different PipelineResources +// +// Deprecated: Unused, preserved only for backwards compatibility type TaskModifier interface { GetStepsToPrepend() []Step GetStepsToAppend() []Step @@ -202,6 +250,8 @@ type TaskModifier interface { } // InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines. +// +// Deprecated: Unused, preserved only for backwards compatibility type InternalTaskModifier struct { // +listType=atomic StepsToPrepend []Step `json:"stepsToPrepend"` @@ -210,69 +260,3 @@ type InternalTaskModifier struct { // +listType=atomic Volumes []v1.Volume `json:"volumes"` } - -// GetStepsToPrepend returns a set of Steps to prepend to the Task. -func (tm *InternalTaskModifier) GetStepsToPrepend() []Step { - return tm.StepsToPrepend -} - -// GetStepsToAppend returns a set of Steps to append to the Task. -func (tm *InternalTaskModifier) GetStepsToAppend() []Step { - return tm.StepsToAppend -} - -// GetVolumes returns a set of Volumes to prepend to the Task pod. -func (tm *InternalTaskModifier) GetVolumes() []v1.Volume { - return tm.Volumes -} - -// ApplyTaskModifier applies a modifier to the task by appending and prepending steps and volumes. -// If steps with the same name exist in ts an error will be returned. If identical Volumes have -// been added, they will not be added again. If Volumes with the same name but different contents -// have been added, an error will be returned. -func ApplyTaskModifier(ts *TaskSpec, tm TaskModifier) error { - steps := tm.GetStepsToPrepend() - for _, step := range steps { - if err := checkStepNotAlreadyAdded(step, ts.Steps); err != nil { - return err - } - } - ts.Steps = append(steps, ts.Steps...) - - steps = tm.GetStepsToAppend() - for _, step := range steps { - if err := checkStepNotAlreadyAdded(step, ts.Steps); err != nil { - return err - } - } - ts.Steps = append(ts.Steps, steps...) - - volumes := tm.GetVolumes() - for _, volume := range volumes { - var alreadyAdded bool - for _, v := range ts.Volumes { - if volume.Name == v.Name { - // If a Volume with the same name but different contents has already been added, we can't add both - if d := cmp.Diff(volume, v); d != "" { - return fmt.Errorf("tried to add volume %s already added but with different contents", volume.Name) - } - // If an identical Volume has already been added, don't add it again - alreadyAdded = true - } - } - if !alreadyAdded { - ts.Volumes = append(ts.Volumes, volume) - } - } - - return nil -} - -func checkStepNotAlreadyAdded(s Step, steps []Step) error { - for _, step := range steps { - if s.Name == step.Name { - return fmt.Errorf("Step %s cannot be added again", step.Name) - } - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go deleted file mode 100644 index 30bad216ae..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/pkg/apis" -) - -// Validate implements apis.Validatable -func (tr *TaskResources) Validate(context.Context) (errs *apis.FieldError) { - if tr != nil { - errs = errs.Also(validateTaskResources(tr.Inputs).ViaField("inputs")) - errs = errs.Also(validateTaskResources(tr.Outputs).ViaField("outputs")) - } - return errs -} - -func validateTaskResources(resources []TaskResource) (errs *apis.FieldError) { - for idx, resource := range resources { - errs = errs.Also(validateResourceType(resource, fmt.Sprintf("%s.type", resource.Name))).ViaIndex(idx) - } - return errs.Also(checkForDuplicates(resources, "name")) -} - -func checkForDuplicates(resources []TaskResource, path string) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - if encountered.Has(strings.ToLower(r.Name)) { - return apis.ErrMultipleOneOf(path) - } - encountered.Insert(strings.ToLower(r.Name)) - } - return nil -} - -func validateResourceType(r TaskResource, path string) *apis.FieldError { - for _, allowed := range AllResourceTypes { - if r.Type == allowed { - return nil - } - } - return apis.ErrInvalidValue(r.Type, path) -} - -// Validate implements apis.Validatable -func (tr *TaskRunResources) Validate(ctx context.Context) *apis.FieldError { - if tr == nil { - return nil - } - if err := validateTaskRunResources(ctx, tr.Inputs, "spec.resources.inputs.name"); err != nil { - return err - } - return validateTaskRunResources(ctx, tr.Outputs, "spec.resources.outputs.name") -} - -// validateTaskRunResources validates that -// 1. resource is not declared more than once -// 2. if both resource reference and resource spec is defined at the same time -// 3. at least resource ref or resource spec is defined -func validateTaskRunResources(ctx context.Context, resources []TaskResourceBinding, path string) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - // We should provide only one binding for each resource required by the Task. - name := strings.ToLower(r.Name) - if encountered.Has(strings.ToLower(name)) { - return apis.ErrMultipleOneOf(path) - } - encountered.Insert(name) - // Check that both resource ref and resource Spec are not present - if r.ResourceRef != nil && r.ResourceSpec != nil { - return apis.ErrDisallowedFields(fmt.Sprintf("%s.resourceRef", path), fmt.Sprintf("%s.resourceSpec", path)) - } - // Check that one of resource ref and resource Spec is present - if (r.ResourceRef == nil || r.ResourceRef.Name == "") && r.ResourceSpec == nil { - return apis.ErrMissingField(fmt.Sprintf("%s.resourceRef", path), fmt.Sprintf("%s.resourceSpec", path)) - } - if r.ResourceSpec != nil && r.ResourceSpec.Validate(ctx) != nil { - return r.ResourceSpec.Validate(ctx) - } - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go index 70197bed16..5e0facad2a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go index d71f513c4b..b4e3764c89 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go @@ -54,7 +54,7 @@ type ResultValue = ParamValue // ResultsType indicates the type of a result; // Used to distinguish between a single string and an array of strings. // Note that there is ResultType used to find out whether a -// PipelineResourceResult is from a task result or not, which is different from +// RunResult is from a task result or not, which is different from // this ResultsType. type ResultsType string diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go index adda23e0b1..fe2fca41a6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go @@ -29,10 +29,10 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } switch { - // Object are alpha features + // Object results is a beta feature - make sure the feature flag is set to "beta" case tr.Type == ResultsTypeObject: errs := validateObjectResult(tr) - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs // Array results is a beta feature - make sure the feature flag is set to "beta" case tr.Type == ResultsTypeArray: diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go index a82835348d..43ad32036f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go @@ -208,19 +208,13 @@ func ParseResultName(resultName string) (string, string) { // in a PipelineTask and returns a list of any references that are found. func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef { refs := []*ResultRef{} - var matrixParams []Param - if pt.IsMatrixed() { - matrixParams = pt.Matrix.Params - } - for _, p := range append(pt.Params, matrixParams...) { + for _, p := range pt.extractAllParams() { expressions, _ := GetVarSubstitutionExpressionsForParam(p) refs = append(refs, NewResultRefs(expressions)...) } - for _, whenExpression := range pt.WhenExpressions { expressions, _ := whenExpression.GetVarSubstitutionExpressions() refs = append(refs, NewResultRefs(expressions)...) } - return refs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json index ef2a7e5622..c0b8e2770c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json @@ -151,173 +151,6 @@ } } }, - "v1alpha1.PipelineResource": { - "description": "PipelineResource describes a resource that is an input to or output from a Task.", - "type": "object", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "default": {}, - "$ref": "#/definitions/v1.ObjectMeta" - }, - "spec": { - "description": "Spec holds the desired state of the PipelineResource from the client", - "default": {}, - "$ref": "#/definitions/v1alpha1.PipelineResourceSpec" - }, - "status": { - "description": "Status is deprecated. It usually is used to communicate the observed state of the PipelineResource from the controller, but was unused as there is no controller for PipelineResource.", - "$ref": "#/definitions/v1alpha1.PipelineResourceStatus" - } - } - }, - "v1alpha1.PipelineResourceList": { - "description": "PipelineResourceList contains a list of PipelineResources", - "type": "object", - "required": [ - "items" - ], - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1alpha1.PipelineResource" - } - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "default": {}, - "$ref": "#/definitions/v1.ListMeta" - } - } - }, - "v1alpha1.PipelineResourceSpec": { - "description": "PipelineResourceSpec defines an individual resources used in the pipeline.", - "type": "object", - "required": [ - "type", - "params" - ], - "properties": { - "description": { - "description": "Description is a user-facing description of the resource that may be used to populate a UI.", - "type": "string" - }, - "params": { - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1alpha1.ResourceParam" - }, - "x-kubernetes-list-type": "atomic" - }, - "secrets": { - "description": "Secrets to fetch to populate some of resource fields", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1alpha1.SecretParam" - }, - "x-kubernetes-list-type": "atomic" - }, - "type": { - "type": "string", - "default": "" - } - } - }, - "v1alpha1.PipelineResourceStatus": { - "description": "PipelineResourceStatus does not contain anything because PipelineResources on their own do not have a status Deprecated", - "type": "object" - }, - "v1alpha1.ResourceDeclaration": { - "description": "ResourceDeclaration defines an input or output PipelineResource declared as a requirement by another type such as a Task or Condition. The Name field will be used to refer to these PipelineResources within the type's definition, and when provided as an Input, the Name will be the path to the volume mounted containing this PipelineResource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).", - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "description": { - "description": "Description is a user-facing description of the declared resource that may be used to populate a UI.", - "type": "string" - }, - "name": { - "description": "Name declares the name by which a resource is referenced in the definition. Resources may be referenced by name in the definition of a Task's steps.", - "type": "string", - "default": "" - }, - "optional": { - "description": "Optional declares the resource as optional. By default optional is set to false which makes a resource required. optional: true - the resource is considered optional optional: false - the resource is considered required (equivalent of not specifying it)", - "type": "boolean" - }, - "targetPath": { - "description": "TargetPath is the path in workspace directory where the resource will be copied.", - "type": "string" - }, - "type": { - "description": "Type is the type of this resource;", - "type": "string", - "default": "" - } - } - }, - "v1alpha1.ResourceParam": { - "description": "ResourceParam declares a string value to use for the parameter called Name, and is used in the specific context of PipelineResources.", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string", - "default": "" - }, - "value": { - "type": "string", - "default": "" - } - } - }, - "v1alpha1.SecretParam": { - "description": "SecretParam indicates which secret can be used to populate a field of the resource", - "type": "object", - "required": [ - "fieldName", - "secretKey", - "secretName" - ], - "properties": { - "fieldName": { - "type": "string", - "default": "" - }, - "secretKey": { - "type": "string", - "default": "" - }, - "secretName": { - "type": "string", - "default": "" - } - } - }, "v1beta1.ChildStatusReference": { "description": "ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.", "type": "object", @@ -391,7 +224,7 @@ } }, "v1beta1.ClusterTask": { - "description": "ClusterTask is a Task with a cluster scope. ClusterTasks are used to represent Tasks that should be publicly addressable from any namespace in the cluster. Deprecated: Please use the cluster resolver instead.", + "description": "ClusterTask is a Task with a cluster scope. ClusterTasks are used to represent Tasks that should be publicly addressable from any namespace in the cluster.\n\nDeprecated: Please use the cluster resolver instead.", "type": "object", "properties": { "apiVersion": { @@ -442,11 +275,11 @@ } }, "v1beta1.ConfigSource": { - "description": "ConfigSource identifies the source where a resource came from. This can include Git repositories, Task Bundles, file checksums, or other information that allows users to identify where the resource came from and what version was used.", + "description": "ConfigSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.", "type": "object", "properties": { "digest": { - "description": "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.digest Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", + "description": "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", "type": "object", "additionalProperties": { "type": "string", @@ -454,11 +287,11 @@ } }, "entryPoint": { - "description": "EntryPoint identifies the entry point into the build. This is often a path to a configuration file and/or a target label within that file. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.entryPoint Example: \"task/git-clone/0.8/git-clone.yaml\"", + "description": "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.8/git-clone.yaml\"", "type": "string" }, "uri": { - "description": "URI indicates the identity of the source of the config. Definition: https://slsa.dev/provenance/v0.2#invocation.configSource.uri Example: \"https://github.com/tektoncd/catalog\"", + "description": "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"", "type": "string" } } @@ -600,6 +433,10 @@ "description": "Description is a user-facing description of the task that may be used to populate a UI.", "type": "string" }, + "displayName": { + "description": "DisplayName is a user-facing name of the task that may be used to populate a UI.", + "type": "string" + }, "kind": { "type": "string" }, @@ -617,7 +454,7 @@ "x-kubernetes-list-type": "atomic" }, "resources": { - "description": "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.", + "description": "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.\n\nDeprecated: Unused, preserved only for backwards compatibility", "$ref": "#/definitions/v1beta1.TaskResources" }, "results": { @@ -676,8 +513,27 @@ } } }, + "v1beta1.IncludeParams": { + "description": "IncludeParams allows passing in a specific combinations of Parameters into the Matrix.", + "type": "object", + "properties": { + "name": { + "description": "Name the specified combination", + "type": "string" + }, + "params": { + "description": "Params takes only `Parameters` of type `\"string\"` The names of the `params` must match the names of the `params` in the underlying `Task`", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1beta1.Param" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1beta1.InternalTaskModifier": { - "description": "InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines.", + "description": "InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "required": [ "stepsToPrepend", @@ -715,6 +571,15 @@ "description": "Matrix is used to fan out Tasks in a Pipeline", "type": "object", "properties": { + "include": { + "description": "Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1beta1.IncludeParams" + }, + "x-kubernetes-list-type": "atomic" + }, "params": { "description": "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.", "type": "array", @@ -782,13 +647,13 @@ "description": "ResultValue is a type alias of ParamValue", "type": "object", "required": [ - "type", - "stringVal", - "arrayVal", - "objectVal" + "Type", + "StringVal", + "ArrayVal", + "ObjectVal" ], "properties": { - "arrayVal": { + "ArrayVal": { "type": "array", "items": { "type": "string", @@ -796,19 +661,19 @@ }, "x-kubernetes-list-type": "atomic" }, - "objectVal": { + "ObjectVal": { "type": "object", "additionalProperties": { "type": "string", "default": "" } }, - "stringVal": { + "StringVal": { "description": "Represents the stored type of ParamValues.", "type": "string", "default": "" }, - "type": { + "Type": { "type": "string", "default": "" } @@ -838,7 +703,7 @@ } }, "v1beta1.PipelineDeclaredResource": { - "description": "PipelineDeclaredResource is used by a Pipeline to declare the types of the PipelineResources that it will required to run and names which can be used to refer to these PipelineResources in PipelineTaskResourceBindings.", + "description": "PipelineDeclaredResource is used by a Pipeline to declare the types of the PipelineResources that it will required to run and names which can be used to refer to these PipelineResources in PipelineTaskResourceBindings.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "required": [ "name", @@ -898,7 +763,7 @@ "type": "string" }, "bundle": { - "description": "Bundle url reference to a Tekton Bundle. Deprecated: Please use ResolverRef with the bundles resolver instead.", + "description": "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.", "type": "string" }, "name": { @@ -908,7 +773,7 @@ } }, "v1beta1.PipelineResourceBinding": { - "description": "PipelineResourceBinding connects a reference to an instance of a PipelineResource with a PipelineResource dependency that the Pipeline has declared", + "description": "PipelineResourceBinding connects a reference to an instance of a PipelineResource with a PipelineResource dependency that the Pipeline has declared\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "name": { @@ -926,7 +791,7 @@ } }, "v1beta1.PipelineResourceRef": { - "description": "PipelineResourceRef can be used to refer to a specific instance of a Resource", + "description": "PipelineResourceRef can be used to refer to a specific instance of a Resource\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "apiVersion": { @@ -939,31 +804,6 @@ } } }, - "v1beta1.PipelineResourceResult": { - "description": "PipelineResourceResult used to export the image name and digest as json", - "type": "object", - "required": [ - "key", - "value" - ], - "properties": { - "key": { - "type": "string", - "default": "" - }, - "resourceName": { - "type": "string" - }, - "type": { - "type": "integer", - "format": "int32" - }, - "value": { - "type": "string", - "default": "" - } - } - }, "v1beta1.PipelineResult": { "description": "PipelineResult used to describe the results of a pipeline", "type": "object", @@ -1111,7 +951,7 @@ "$ref": "#/definitions/pod.Template" }, "resources": { - "description": "Resources is a list of bindings specifying which actual instances of PipelineResources to use for the resources the Pipeline has declared it needs.", + "description": "Resources is a list of bindings specifying which actual instances of PipelineResources to use for the resources the Pipeline has declared it needs.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "array", "items": { "default": {}, @@ -1136,7 +976,7 @@ "x-kubernetes-list-type": "atomic" }, "timeout": { - "description": "Timeout Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Timeout is the Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration\n\nDeprecated: use pipelineRunSpec.Timeouts.Pipeline instead", "$ref": "#/definitions/v1.Duration" }, "timeouts": { @@ -1215,6 +1055,13 @@ "description": "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).", "$ref": "#/definitions/v1beta1.Provenance" }, + "runs": { + "description": "Runs is a map of PipelineRunRunStatus with the run name as the key\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1beta1.PipelineRunRunStatus" + } + }, "skippedTasks": { "description": "list of tasks that were skipped due to when expressions evaluating to false", "type": "array", @@ -1235,6 +1082,13 @@ "startTime": { "description": "StartTime is the time the PipelineRun is actually started.", "$ref": "#/definitions/v1.Time" + }, + "taskRuns": { + "description": "TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1beta1.PipelineRunTaskRunStatus" + } } } }, @@ -1276,6 +1130,13 @@ "description": "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).", "$ref": "#/definitions/v1beta1.Provenance" }, + "runs": { + "description": "Runs is a map of PipelineRunRunStatus with the run name as the key\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1beta1.PipelineRunRunStatus" + } + }, "skippedTasks": { "description": "list of tasks that were skipped due to when expressions evaluating to false", "type": "array", @@ -1296,6 +1157,13 @@ "startTime": { "description": "StartTime is the time the PipelineRun is actually started.", "$ref": "#/definitions/v1.Time" + }, + "taskRuns": { + "description": "TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1beta1.PipelineRunTaskRunStatus" + } } } }, @@ -1330,6 +1198,10 @@ "description": "Description is a user-facing description of the pipeline that may be used to populate a UI.", "type": "string" }, + "displayName": { + "description": "DisplayName is a user-facing name of the pipeline that may be used to populate a UI.", + "type": "string" + }, "finally": { "description": "Finally declares the list of Tasks that execute just before leaving the Pipeline i.e. either after all Tasks are finished executing successfully or after a failure which would result in ending the Pipeline", "type": "array", @@ -1349,7 +1221,7 @@ "x-kubernetes-list-type": "atomic" }, "resources": { - "description": "Resources declares the names and types of the resources given to the Pipeline's tasks as inputs and outputs.", + "description": "Deprecated: Unused, preserved only for backwards compatibility", "type": "array", "items": { "default": {}, @@ -1390,6 +1262,14 @@ "description": "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.", "type": "object", "properties": { + "description": { + "description": "Description is the description of this task within the context of a Pipeline. This description may be used to populate a UI.", + "type": "string" + }, + "displayName": { + "description": "DisplayName is the display name of this task within the context of a Pipeline. This display name may be used to populate a UI.", + "type": "string" + }, "matrix": { "description": "Matrix declares parameters used to fan out this task.", "$ref": "#/definitions/v1beta1.Matrix" @@ -1408,7 +1288,7 @@ "x-kubernetes-list-type": "atomic" }, "resources": { - "description": "Resources declares the resources given to this task as inputs and outputs.", + "description": "Deprecated: Unused, preserved only for backwards compatibility", "$ref": "#/definitions/v1beta1.PipelineTaskResources" }, "retries": { @@ -1457,7 +1337,7 @@ } }, "v1beta1.PipelineTaskInputResource": { - "description": "PipelineTaskInputResource maps the name of a declared PipelineResource input dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used. This input may come from a previous task.", + "description": "PipelineTaskInputResource maps the name of a declared PipelineResource input dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used. This input may come from a previous task.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "required": [ "name", @@ -1506,7 +1386,7 @@ } }, "v1beta1.PipelineTaskOutputResource": { - "description": "PipelineTaskOutputResource maps the name of a declared PipelineResource output dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used.", + "description": "PipelineTaskOutputResource maps the name of a declared PipelineResource output dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "required": [ "name", @@ -1544,7 +1424,7 @@ } }, "v1beta1.PipelineTaskResources": { - "description": "PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.", + "description": "PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "inputs": { @@ -1615,7 +1495,7 @@ } }, "v1beta1.PipelineWorkspaceDeclaration": { - "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding.\n\nDeprecated: use PipelineWorkspaceDeclaration type instead", "type": "object", "required": [ "name" @@ -1646,16 +1526,42 @@ } }, "v1beta1.Provenance": { - "description": "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.). For now, it only contains the subfield `ConfigSource` that identifies the source where a build config file came from. In future, it can be expanded as needed to include more metadata about the build. This field aims to be used to carry minimum amount of the authenticated metadata in *Run status so that Tekton Chains can pick it up and record in the provenance it generates.", + "description": "Provenance contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote build definition was fetched. This field aims to carry minimum amoumt of metadata in *Run status so that Tekton Chains can capture them in the provenance.", "type": "object", "properties": { "configSource": { - "description": "ConfigSource identifies the source where a resource came from.", + "description": "Deprecated: Use RefSource instead", "$ref": "#/definitions/v1beta1.ConfigSource" }, "featureFlags": { "description": "FeatureFlags identifies the feature flags that were used during the task/pipeline run", "$ref": "#/definitions/github.com.tektoncd.pipeline.pkg.apis.config.FeatureFlags" + }, + "refSource": { + "description": "RefSource identifies the source where a remote task/pipeline came from.", + "$ref": "#/definitions/v1beta1.RefSource" + } + } + }, + "v1beta1.RefSource": { + "description": "RefSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.", + "type": "object", + "properties": { + "digest": { + "description": "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "entryPoint": { + "description": "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.8/git-clone.yaml\"", + "type": "string" + }, + "uri": { + "description": "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"", + "type": "string" } } }, @@ -1735,7 +1641,8 @@ "type": "object", "required": [ "data", - "source" + "source", + "refSource" ], "properties": { "annotations": { @@ -1766,8 +1673,12 @@ "type": "integer", "format": "int64" }, + "refSource": { + "description": "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", + "$ref": "#/definitions/v1beta1.RefSource" + }, "source": { - "description": "Source is the source reference of the remote data that records the url, digest and the entrypoint.", + "description": "Deprecated: Use RefSource instead", "$ref": "#/definitions/v1beta1.ConfigSource" } } @@ -1777,7 +1688,8 @@ "type": "object", "required": [ "data", - "source" + "source", + "refSource" ], "properties": { "data": { @@ -1785,8 +1697,12 @@ "type": "string", "default": "" }, + "refSource": { + "description": "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", + "$ref": "#/definitions/v1beta1.RefSource" + }, "source": { - "description": "Source is the source reference of the remote data that records the url, digest and the entrypoint.", + "description": "Deprecated: Use RefSource instead", "$ref": "#/definitions/v1beta1.ConfigSource" } } @@ -2108,11 +2024,11 @@ "type": "string" }, "lifecycle": { - "description": "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Lifecycle" }, "livenessProbe": { - "description": "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Step will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Periodic probe of container liveness. Step will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Probe" }, "name": { @@ -2125,7 +2041,7 @@ "type": "string" }, "ports": { - "description": "Deprecated. This field will be removed in a future release. List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "description": "List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", "type": "array", "items": { "default": {}, @@ -2140,7 +2056,7 @@ "x-kubernetes-patch-strategy": "merge" }, "readinessProbe": { - "description": "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Step will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Periodic probe of container service readiness. Step will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Probe" }, "resources": { @@ -2157,7 +2073,7 @@ "$ref": "#/definitions/v1.SecurityContext" }, "startupProbe": { - "description": "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Probe" }, "stderrConfig": { @@ -2165,11 +2081,11 @@ "$ref": "#/definitions/v1beta1.StepOutputConfig" }, "stdin": { - "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.\n\nDeprecated: This field will be removed in a future release.", "type": "boolean" }, "stdinOnce": { - "description": "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false\n\nDeprecated: This field will be removed in a future release.", "type": "boolean" }, "stdoutConfig": { @@ -2177,11 +2093,11 @@ "$ref": "#/definitions/v1beta1.StepOutputConfig" }, "terminationMessagePath": { - "description": "Deprecated. This field will be removed in a future release and can't be meaningfully used.", + "description": "Deprecated: This field will be removed in a future release and can't be meaningfully used.", "type": "string" }, "terminationMessagePolicy": { - "description": "Deprecated. This field will be removed in a future release and can't be meaningfully used.", + "description": "Deprecated: This field will be removed in a future release and can't be meaningfully used.", "type": "string" }, "timeout": { @@ -2189,7 +2105,7 @@ "$ref": "#/definitions/v1.Duration" }, "tty": { - "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", + "description": "Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.\n\nDeprecated: This field will be removed in a future release.", "type": "boolean" }, "volumeDevices": { @@ -2320,20 +2236,20 @@ "type": "string" }, "lifecycle": { - "description": "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Lifecycle" }, "livenessProbe": { - "description": "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Probe" }, "name": { - "description": "Deprecated. This field will be removed in a future release. Default name for each Step specified as a DNS_LABEL. Each Step in a Task must have a unique name. Cannot be updated.", + "description": "Default name for each Step specified as a DNS_LABEL. Each Step in a Task must have a unique name. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", "type": "string", "default": "" }, "ports": { - "description": "Deprecated. This field will be removed in a future release. List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "description": "List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.", "type": "array", "items": { "default": {}, @@ -2348,7 +2264,7 @@ "x-kubernetes-patch-strategy": "merge" }, "readinessProbe": { - "description": "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Probe" }, "resources": { @@ -2361,27 +2277,27 @@ "$ref": "#/definitions/v1.SecurityContext" }, "startupProbe": { - "description": "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.", "$ref": "#/definitions/v1.Probe" }, "stdin": { - "description": "Deprecated. This field will be removed in a future release. Whether this Step should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Step will always result in EOF. Default is false.", + "description": "Whether this Step should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Step will always result in EOF. Default is false.\n\nDeprecated: This field will be removed in a future release.", "type": "boolean" }, "stdinOnce": { - "description": "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false\n\nDeprecated: This field will be removed in a future release.", "type": "boolean" }, "terminationMessagePath": { - "description": "Deprecated. This field will be removed in a future release and cannot be meaningfully used.", + "description": "Deprecated: This field will be removed in a future release and cannot be meaningfully used.", "type": "string" }, "terminationMessagePolicy": { - "description": "Deprecated. This field will be removed in a future release and cannot be meaningfully used.", + "description": "Deprecated: This field will be removed in a future release and cannot be meaningfully used.", "type": "string" }, "tty": { - "description": "Deprecated. This field will be removed in a future release. Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", + "description": "Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.\n\nDeprecated: This field will be removed in a future release.", "type": "boolean" }, "volumeDevices": { @@ -2468,15 +2384,15 @@ "type": "object", "properties": { "apiVersion": { - "description": "API version of the referent", + "description": "API version of the referent Note: A Task with non-empty APIVersion and Kind is considered a Custom Task", "type": "string" }, "bundle": { - "description": "Bundle url reference to a Tekton Bundle. Deprecated: Please use ResolverRef with the bundles resolver instead.", + "description": "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.", "type": "string" }, "kind": { - "description": "TaskKind indicates the kind of the task, namespaced or cluster scoped.", + "description": "TaskKind indicates the Kind of the Task: 1. Namespaced Task when Kind is set to \"Task\". If Kind is \"\", it defaults to \"Task\". 2. Cluster-Scoped Task when Kind is set to \"ClusterTask\" 3. Custom Task when Kind is non-empty and APIVersion is non-empty", "type": "string" }, "name": { @@ -2486,7 +2402,7 @@ } }, "v1beta1.TaskResource": { - "description": "TaskResource defines an input or output Resource declared as a requirement by a Task. The Name field will be used to refer to these Resources within the Task definition, and when provided as an Input, the Name will be the path to the volume mounted containing this Resource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).", + "description": "TaskResource defines an input or output Resource declared as a requirement by a Task. The Name field will be used to refer to these Resources within the Task definition, and when provided as an Input, the Name will be the path to the volume mounted containing this Resource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "required": [ "name", @@ -2518,7 +2434,7 @@ } }, "v1beta1.TaskResourceBinding": { - "description": "TaskResourceBinding points to the PipelineResource that will be used for the Task input or output called Name.", + "description": "TaskResourceBinding points to the PipelineResource that will be used for the Task input or output called Name.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "name": { @@ -2545,7 +2461,7 @@ } }, "v1beta1.TaskResources": { - "description": "TaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.", + "description": "TaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "inputs": { @@ -2639,7 +2555,7 @@ } }, "v1beta1.TaskRunInputs": { - "description": "TaskRunInputs holds the input values that this task was invoked with.", + "description": "TaskRunInputs holds the input values that this task was invoked with.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "params": { @@ -2689,7 +2605,7 @@ } }, "v1beta1.TaskRunOutputs": { - "description": "TaskRunOutputs holds the output values that this task was invoked with.", + "description": "TaskRunOutputs holds the output values that this task was invoked with.\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "resources": { @@ -2703,7 +2619,7 @@ } }, "v1beta1.TaskRunResources": { - "description": "TaskRunResources allows a TaskRun to declare inputs and outputs TaskResourceBinding", + "description": "TaskRunResources allows a TaskRun to declare inputs and outputs TaskResourceBinding\n\nDeprecated: Unused, preserved only for backwards compatibility", "type": "object", "properties": { "inputs": { @@ -2794,6 +2710,7 @@ "$ref": "#/definitions/pod.Template" }, "resources": { + "description": "Deprecated: Unused, preserved only for backwards compatibility", "$ref": "#/definitions/v1beta1.TaskRunResources" }, "retries": { @@ -2869,7 +2786,7 @@ } }, "cloudEvents": { - "description": "Deprecated. CloudEvents describe the state of each cloud event requested via a CloudEventResource.", + "description": "CloudEvents describe the state of each cloud event requested via a CloudEventResource.\n\nDeprecated: Removed in v0.44.0.", "type": "array", "items": { "default": {}, @@ -2906,11 +2823,11 @@ "$ref": "#/definitions/v1beta1.Provenance" }, "resourcesResult": { - "description": "Results from Resources built during the TaskRun. currently includes the digest of build container images", + "description": "Results from Resources built during the TaskRun. This is tomb-stoned along with the removal of pipelineResources Deprecated: this field is not populated and is preserved only for backwards compatibility", "type": "array", "items": { "default": {}, - "$ref": "#/definitions/v1beta1.PipelineResourceResult" + "$ref": "#/definitions/github.com.tektoncd.pipeline.pkg.result.RunResult" }, "x-kubernetes-list-type": "atomic" }, @@ -2976,7 +2893,7 @@ ], "properties": { "cloudEvents": { - "description": "Deprecated. CloudEvents describe the state of each cloud event requested via a CloudEventResource.", + "description": "CloudEvents describe the state of each cloud event requested via a CloudEventResource.\n\nDeprecated: Removed in v0.44.0.", "type": "array", "items": { "default": {}, @@ -2998,11 +2915,11 @@ "$ref": "#/definitions/v1beta1.Provenance" }, "resourcesResult": { - "description": "Results from Resources built during the TaskRun. currently includes the digest of build container images", + "description": "Results from Resources built during the TaskRun. This is tomb-stoned along with the removal of pipelineResources Deprecated: this field is not populated and is preserved only for backwards compatibility", "type": "array", "items": { "default": {}, - "$ref": "#/definitions/v1beta1.PipelineResourceResult" + "$ref": "#/definitions/github.com.tektoncd.pipeline.pkg.result.RunResult" }, "x-kubernetes-list-type": "atomic" }, @@ -3088,6 +3005,10 @@ "description": "Description is a user-facing description of the task that may be used to populate a UI.", "type": "string" }, + "displayName": { + "description": "DisplayName is a user-facing name of the task that may be used to populate a UI.", + "type": "string" + }, "params": { "description": "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", "type": "array", @@ -3098,7 +3019,7 @@ "x-kubernetes-list-type": "atomic" }, "resources": { - "description": "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.", + "description": "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.\n\nDeprecated: Unused, preserved only for backwards compatibility", "$ref": "#/definitions/v1beta1.TaskResources" }, "results": { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go index ee0e8bd7b1..19bbec3fde 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go @@ -21,13 +21,9 @@ import ( "fmt" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" - "github.com/tektoncd/pipeline/pkg/apis/version" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) -const resourcesAnnotationKey = "tekton.dev/v1beta1Resources" - var _ apis.Convertible = (*Task)(nil) // ConvertTo implements apis.Convertible @@ -38,9 +34,6 @@ func (t *Task) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Task: sink.ObjectMeta = t.ObjectMeta - if err := serializeResources(&sink.ObjectMeta, &t.Spec); err != nil { - return err - } return t.Spec.ConvertTo(ctx, &sink.Spec) default: return fmt.Errorf("unknown version, got: %T", sink) @@ -85,6 +78,7 @@ func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1.TaskSpec) error { p.convertTo(ctx, &new) sink.Params = append(sink.Params, new) } + sink.DisplayName = ts.DisplayName sink.Description = ts.Description return nil } @@ -97,9 +91,6 @@ func (t *Task) ConvertFrom(ctx context.Context, from apis.Convertible) error { switch source := from.(type) { case *v1.Task: t.ObjectMeta = source.ObjectMeta - if err := deserializeResources(&t.ObjectMeta, &t.Spec); err != nil { - return err - } return t.Spec.ConvertFrom(ctx, &source.Spec) default: return fmt.Errorf("unknown version, got: %T", t) @@ -144,25 +135,7 @@ func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error new.convertFrom(ctx, p) ts.Params = append(ts.Params, new) } + ts.DisplayName = source.DisplayName ts.Description = source.Description return nil } - -func serializeResources(meta *metav1.ObjectMeta, spec *TaskSpec) error { - if spec.Resources == nil { - return nil - } - return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) -} - -func deserializeResources(meta *metav1.ObjectMeta, spec *TaskSpec) error { - resources := &TaskResources{} - err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey) - if err != nil { - return err - } - if resources.Inputs != nil || resources.Outputs != nil { - spec.Resources = resources - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go index 957b0aef69..850929d015 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go @@ -24,17 +24,6 @@ import ( "knative.dev/pkg/kmeta" ) -const ( - // TaskRunResultType default task run result value - TaskRunResultType ResultType = 1 - // PipelineResourceResultType default pipeline result value - PipelineResourceResultType = 2 - // InternalTektonResultType default internal tekton result value - InternalTektonResultType = 3 - // UnknownResultType default unknown result type value - UnknownResultType = 10 -) - // +genclient // +genclient:noStatus // +genreconciler:krshapedlogic=false @@ -83,6 +72,8 @@ type TaskSpec struct { // Resources is a list input and output resource to run the task // Resources are represented in TaskRuns as bindings to instances of // PipelineResources. + // + // Deprecated: Unused, preserved only for backwards compatibility // +optional Resources *TaskResources `json:"resources,omitempty"` @@ -91,7 +82,12 @@ type TaskSpec struct { // value. // +optional // +listType=atomic - Params []ParamSpec `json:"params,omitempty"` + Params ParamSpecs `json:"params,omitempty"` + + // DisplayName is a user-facing name of the task that may be + // used to populate a UI. + // +optional + DisplayName string `json:"displayName,omitempty"` // Description is a user-facing description of the task that may be // used to populate a UI. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index 24447097a0..e76f342121 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -94,13 +94,14 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateSteps(ctx, mergedSteps).ViaField("steps")) errs = errs.Also(validateSidecarNames(ts.Sidecars)) - errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params")) errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params)) - errs = errs.Also(ValidateResourcesVariables(ctx, ts.Steps, ts.Resources)) errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps)) errs = errs.Also(validateTaskResultsVariables(ctx, ts.Steps, ts.Results)) errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results")) + if ts.Resources != nil { + errs = errs.Also(apis.ErrDisallowedFields("resources")) + } return errs } @@ -297,9 +298,9 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { if p.Type == ParamTypeObject { - // Object type parameter is an alpha feature and will fail validation if it's used in a task spec - // when the enable-api-fields feature gate is not "alpha". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + // Object type parameter is a beta feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha" or "beta". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) } errs = errs.Also(p.ValidateType(ctx)) } @@ -383,6 +384,8 @@ func ValidateParameterVariables(ctx context.Context, steps []Step, params []Para arrayParameterNames.Insert(p.Name) case ParamTypeObject: objectParamSpecs = append(objectParamSpecs, p) + case ParamTypeString: + fallthrough default: stringParameterNames.Insert(p.Name) } @@ -421,25 +424,6 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T return errs } -// ValidateResourcesVariables validates all variables within a TaskResources against a slice of Steps -func ValidateResourcesVariables(ctx context.Context, steps []Step, resources *TaskResources) *apis.FieldError { - if resources == nil { - return nil - } - resourceNames := sets.NewString() - if resources.Inputs != nil { - for _, r := range resources.Inputs { - resourceNames.Insert(r.Name) - } - } - if resources.Outputs != nil { - for _, r := range resources.Outputs { - resourceNames.Insert(r.Name) - } - } - return validateVariables(ctx, steps, "resources.(?:inputs|outputs)", resourceNames) -} - // validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) { objectParameterNames := sets.NewString() @@ -621,3 +605,39 @@ func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *a func isParamRefs(s string) bool { return strings.HasPrefix(s, "$("+ParamsPrefix) } + +// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. +// error is returned when the array indexing reference is out of bound of the array param +// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. +// - `trParams` are params from taskrun. +// - `taskSpec` contains params declarations. +func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { + cfg := config.FromContextOrDefaults(ctx) + if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { + return nil + } + + // Collect all array params lengths + arrayParamsLengths := ts.Params.extractParamArrayLengths() + for k, v := range params.extractParamArrayLengths() { + arrayParamsLengths[k] = v + } + + // collect all the possible places to use param references + paramsRefs := []string{} + paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...) + paramsRefs = append(paramsRefs, extractParamRefsFromStepTemplate(ts.StepTemplate)...) + paramsRefs = append(paramsRefs, extractParamRefsFromVolumes(ts.Volumes)...) + for _, v := range ts.Workspaces { + paramsRefs = append(paramsRefs, v.MountPath) + } + paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...) + + // extract all array indexing references, for example []{"$(params.array-params[1])"} + arrayIndexParamRefs := []string{} + for _, p := range paramsRefs { + arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) + } + + return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go index 2816dc2437..e8e695194b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( @@ -34,7 +50,7 @@ func (tr TaskRef) convertBundleToResolver(sink *v1.TaskRef) { if tr.Bundle != "" { sink.ResolverRef = v1.ResolverRef{ Resolver: "bundles", - Params: []v1.Param{{ + Params: v1.Params{{ Name: "bundle", Value: v1.ParamValue{StringVal: tr.Bundle, Type: v1.ParamTypeString}, }, { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go index 49f9ff66ea..f8f231cd96 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go @@ -20,12 +20,17 @@ package v1beta1 type TaskRef struct { // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name,omitempty"` - // TaskKind indicates the kind of the task, namespaced or cluster scoped. + // TaskKind indicates the Kind of the Task: + // 1. Namespaced Task when Kind is set to "Task". If Kind is "", it defaults to "Task". + // 2. Cluster-Scoped Task when Kind is set to "ClusterTask" + // 3. Custom Task when Kind is non-empty and APIVersion is non-empty Kind TaskKind `json:"kind,omitempty"` // API version of the referent + // Note: A Task with non-empty APIVersion and Kind is considered a Custom Task // +optional APIVersion string `json:"apiVersion,omitempty"` // Bundle url reference to a Tekton Bundle. + // // Deprecated: Please use ResolverRef with the bundles resolver instead. // +optional Bundle string `json:"bundle,omitempty"` @@ -48,3 +53,10 @@ const ( // ClusterTaskKind indicates that task type has a cluster scope. ClusterTaskKind TaskKind = "ClusterTask" ) + +// IsCustomTask checks whether the reference is to a Custom Task +func (tr *TaskRef) IsCustomTask() bool { + // Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`, + // the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457 + return tr != nil && tr.APIVersion != "" && tr.Kind != "" +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go index 971a78498b..0297139922 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go @@ -62,5 +62,5 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { } } } - return + return //nolint:nakedret } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go index bb081314b4..b2745ec5c7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go @@ -41,15 +41,9 @@ func (tr *TaskRun) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.TaskRun: sink.ObjectMeta = tr.ObjectMeta - if err := serializeTaskRunResources(&sink.ObjectMeta, &tr.Spec); err != nil { - return err - } if err := serializeTaskRunCloudEvents(&sink.ObjectMeta, &tr.Status); err != nil { return err } - if err := serializeTaskRunResourcesResult(&sink.ObjectMeta, &tr.Status); err != nil { - return err - } if err := tr.Status.ConvertTo(ctx, &sink.Status); err != nil { return err } @@ -118,15 +112,9 @@ func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error switch source := from.(type) { case *v1.TaskRun: tr.ObjectMeta = source.ObjectMeta - if err := deserializeTaskRunResources(&tr.ObjectMeta, &tr.Spec); err != nil { - return err - } if err := deserializeTaskRunCloudEvents(&tr.ObjectMeta, &tr.Status); err != nil { return err } - if err := deserializeTaskRunResourcesResult(&tr.ObjectMeta, &tr.Status); err != nil { - return err - } if err := tr.Status.ConvertFrom(ctx, source.Status); err != nil { return err } @@ -360,25 +348,6 @@ func (ss *SidecarState) convertFrom(ctx context.Context, source v1.SidecarState) ss.ImageID = source.ImageID } -func serializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error { - if spec.Resources == nil { - return nil - } - return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) -} - -func deserializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error { - resources := &TaskRunResources{} - err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey) - if err != nil { - return err - } - if resources.Inputs != nil || resources.Outputs != nil { - spec.Resources = resources - } - return nil -} - func serializeTaskRunCloudEvents(meta *metav1.ObjectMeta, status *TaskRunStatus) error { if status.CloudEvents == nil { return nil @@ -397,22 +366,3 @@ func deserializeTaskRunCloudEvents(meta *metav1.ObjectMeta, status *TaskRunStatu } return nil } - -func serializeTaskRunResourcesResult(meta *metav1.ObjectMeta, status *TaskRunStatus) error { - if status.ResourcesResult == nil { - return nil - } - return version.SerializeToMetadata(meta, status.ResourcesResult, resourcesResultAnnotationKey) -} - -func deserializeTaskRunResourcesResult(meta *metav1.ObjectMeta, status *TaskRunStatus) error { - resourcesResult := []PipelineResourceResult{} - err := version.DeserializeFromMetadata(meta, &resourcesResult, resourcesResultAnnotationKey) - if err != nil { - return err - } - if len(resourcesResult) != 0 { - status.ResourcesResult = resourcesResult - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go index feecece526..61f3285dec 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go @@ -50,8 +50,13 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { // SetDefaults implements apis.Defaultable func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { cfg := config.FromContextOrDefaults(ctx) - if trs.TaskRef != nil && trs.TaskRef.Kind == "" { - trs.TaskRef.Kind = NamespacedTaskKind + if trs.TaskRef != nil { + if trs.TaskRef.Kind == "" { + trs.TaskRef.Kind = NamespacedTaskKind + } + if trs.TaskRef.Name == "" && trs.TaskRef.Resolver == "" { + trs.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType) + } } if trs.Timeout == nil { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go index 9cf16f6f96..27bb514322 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -40,7 +40,8 @@ type TaskRunSpec struct { Debug *TaskRunDebug `json:"debug,omitempty"` // +optional // +listType=atomic - Params []Param `json:"params,omitempty"` + Params Params `json:"params,omitempty"` + // Deprecated: Unused, preserved only for backwards compatibility // +optional Resources *TaskRunResources `json:"resources,omitempty"` // +optional @@ -115,23 +116,6 @@ type TaskRunDebug struct { Breakpoint []string `json:"breakpoint,omitempty"` } -// TaskRunInputs holds the input values that this task was invoked with. -type TaskRunInputs struct { - // +optional - // +listType=atomic - Resources []TaskResourceBinding `json:"resources,omitempty"` - // +optional - // +listType=atomic - Params []Param `json:"params,omitempty"` -} - -// TaskRunOutputs holds the output values that this task was invoked with. -type TaskRunOutputs struct { - // +optional - // +listType=atomic - Resources []TaskResourceBinding `json:"resources,omitempty"` -} - var taskRunCondSet = apis.NewBatchConditionSet() // TaskRunStatus defines the observed state of TaskRun @@ -251,9 +235,11 @@ type TaskRunStatusFields struct { // +listType=atomic Steps []StepState `json:"steps,omitempty"` - // Deprecated. // CloudEvents describe the state of each cloud event requested via a // CloudEventResource. + // + // Deprecated: Removed in v0.44.0. + // // +optional // +listType=atomic CloudEvents []CloudEventDelivery `json:"cloudEvents,omitempty"` @@ -264,8 +250,9 @@ type TaskRunStatusFields struct { // +listType=atomic RetriesStatus []TaskRunStatus `json:"retriesStatus,omitempty"` - // Results from Resources built during the TaskRun. currently includes - // the digest of build container images + // Results from Resources built during the TaskRun. + // This is tomb-stoned along with the removal of pipelineResources + // Deprecated: this field is not populated and is preserved only for backwards compatibility // +optional // +listType=atomic ResourcesResult []PipelineResourceResult `json:"resourcesResult,omitempty"` @@ -507,7 +494,7 @@ func (tr *TaskRun) GetTimeout(ctx context.Context) time.Duration { // Use the platform default is no timeout is set if tr.Spec.Timeout == nil { defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) - return defaultTimeout * time.Minute + return defaultTimeout * time.Minute //nolint:durationcheck } return tr.Spec.Timeout.Duration } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index f8d379c6df..ef414612b4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -72,7 +72,6 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // Validate propagated parameters errs = errs.Also(ts.validateInlineParameters(ctx)) errs = errs.Also(ValidateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) - errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) if ts.Debug != nil { errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug")) errs = errs.Also(validateDebug(ts.Debug).ViaField("debug")) @@ -110,6 +109,9 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { if ts.PodTemplate != nil { errs = errs.Also(validatePodTemplateEnv(ctx, *ts.PodTemplate)) } + if ts.Resources != nil { + errs = errs.Also(apis.ErrDisallowedFields("resources")) + } return errs } @@ -238,13 +240,13 @@ func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs } // ValidateParameters makes sure the params for the Task are valid. -func ValidateParameters(ctx context.Context, params []Param) (errs *apis.FieldError) { +func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) { var names []string for _, p := range params { if p.Value.Type == ParamTypeObject { - // Object type parameter is an alpha feature and will fail validation if it's used in a taskrun spec - // when the enable-api-fields feature gate is not "alpha". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + // Object type parameter is a beta feature and will fail validation if it's used in a taskrun spec + // when the enable-api-fields feature gate is not "alpha" or "beta". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) } names = append(names, p.Name) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go index 727e8e6f3a..f7daa5cfb4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go index f915fe13df..194821afce 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go @@ -87,6 +87,7 @@ type WorkspaceBinding struct { // WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun // is expected to populate with a workspace binding. +// // Deprecated: use PipelineWorkspaceDeclaration type instead type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index 356540a8d1..fdd0282578 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -26,6 +26,7 @@ import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" runv1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1" + result "github.com/tektoncd/pipeline/pkg/result" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -152,6 +153,56 @@ func (in *ClusterTaskList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Combination) DeepCopyInto(out *Combination) { + { + in := &in + *out = make(Combination, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combination. +func (in Combination) DeepCopy() Combination { + if in == nil { + return nil + } + out := new(Combination) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Combinations) DeepCopyInto(out *Combinations) { + { + in := &in + *out = make(Combinations, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(Combination, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combinations. +func (in Combinations) DeepCopy() Combinations { + if in == nil { + return nil + } + out := new(Combinations) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigSource) DeepCopyInto(out *ConfigSource) { *out = *in @@ -251,7 +302,7 @@ func (in *CustomRunSpec) DeepCopyInto(out *CustomRunSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -320,6 +371,51 @@ func (in *EmbeddedTask) DeepCopy() *EmbeddedTask { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludeParams) DeepCopyInto(out *IncludeParams) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(Params, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParams. +func (in *IncludeParams) DeepCopy() *IncludeParams { + if in == nil { + return nil + } + out := new(IncludeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in IncludeParamsList) DeepCopyInto(out *IncludeParamsList) { + { + in := &in + *out = make(IncludeParamsList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParamsList. +func (in IncludeParamsList) DeepCopy() IncludeParamsList { + if in == nil { + return nil + } + out := new(IncludeParamsList) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalTaskModifier) DeepCopyInto(out *InternalTaskModifier) { *out = *in @@ -362,7 +458,14 @@ func (in *Matrix) DeepCopyInto(out *Matrix) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make(IncludeParamsList, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -425,6 +528,28 @@ func (in *ParamSpec) DeepCopy() *ParamSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ParamSpecs) DeepCopyInto(out *ParamSpecs) { + { + in := &in + *out = make(ParamSpecs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpecs. +func (in ParamSpecs) DeepCopy() ParamSpecs { + if in == nil { + return nil + } + out := new(ParamSpecs) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ParamValue) DeepCopyInto(out *ParamValue) { *out = *in @@ -453,6 +578,28 @@ func (in *ParamValue) DeepCopy() *ParamValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Params) DeepCopyInto(out *Params) { + { + in := &in + *out = make(Params, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Params. +func (in Params) DeepCopy() Params { + if in == nil { + return nil + } + out := new(Params) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Pipeline) DeepCopyInto(out *Pipeline) { *out = *in @@ -588,22 +735,6 @@ func (in *PipelineResourceRef) DeepCopy() *PipelineResourceRef { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineResourceResult) DeepCopyInto(out *PipelineResourceResult) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResourceResult. -func (in *PipelineResourceResult) DeepCopy() *PipelineResourceResult { - if in == nil { - return nil - } - out := new(PipelineResourceResult) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineResult) DeepCopyInto(out *PipelineResult) { *out = *in @@ -749,7 +880,7 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -825,6 +956,36 @@ func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } + if in.TaskRuns != nil { + in, out := &in.TaskRuns, &out.TaskRuns + *out = make(map[string]*PipelineRunTaskRunStatus, len(*in)) + for key, val := range *in { + var outVal *PipelineRunTaskRunStatus + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(PipelineRunTaskRunStatus) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.Runs != nil { + in, out := &in.Runs, &out.Runs + *out = make(map[string]*PipelineRunRunStatus, len(*in)) + for key, val := range *in { + var outVal *PipelineRunRunStatus + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(PipelineRunRunStatus) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } if in.PipelineResults != nil { in, out := &in.PipelineResults, &out.PipelineResults *out = make([]PipelineRunResult, len(*in)) @@ -925,7 +1086,7 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]ParamSpec, len(*in)) + *out = make(ParamSpecs, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -994,7 +1155,7 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1261,6 +1422,11 @@ func (in *Provenance) DeepCopyInto(out *Provenance) { *out = new(ConfigSource) (*in).DeepCopyInto(*out) } + if in.RefSource != nil { + in, out := &in.RefSource, &out.RefSource + *out = new(RefSource) + (*in).DeepCopyInto(*out) + } if in.FeatureFlags != nil { in, out := &in.FeatureFlags, &out.FeatureFlags *out = new(config.FeatureFlags) @@ -1279,12 +1445,35 @@ func (in *Provenance) DeepCopy() *Provenance { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefSource) DeepCopyInto(out *RefSource) { + *out = *in + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefSource. +func (in *RefSource) DeepCopy() *RefSource { + if in == nil { + return nil + } + out := new(RefSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2039,7 +2228,7 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make(Params, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2160,7 +2349,7 @@ func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) { } if in.ResourcesResult != nil { in, out := &in.ResourcesResult, &out.ResourcesResult - *out = make([]PipelineResourceResult, len(*in)) + *out = make([]result.RunResult, len(*in)) copy(*out, *in) } if in.TaskRunResults != nil { @@ -2234,7 +2423,7 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { } if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]ParamSpec, len(*in)) + *out = make(ParamSpecs, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/doc.go index b2c8398ab3..65c3fe6b91 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/doc.go @@ -1,12 +1,9 @@ /* Copyright 2019 The Tekton Authors - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,7 +12,6 @@ limitations under the License. */ // Package v1alpha1 contains API Schema definitions for the pipeline v1alpha1 API group -// +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/tektoncd/pipeline/pkg/apis/resource // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_defaults.go deleted file mode 100644 index faf03c08c1..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_defaults.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*PipelineResource)(nil) - -// SetDefaults implements api.Defaultable -func (t *PipelineResource) SetDefaults(ctx context.Context) { - t.Spec.SetDefaults(ctx) -} - -// SetDefaults implements api.Defaultable -func (ts *PipelineResourceSpec) SetDefaults(ctx context.Context) { -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_types.go index e588feb918..6cde87cb8a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_types.go @@ -1,12 +1,10 @@ /* +// Deprecated: Unused, preserved only for backwards compatibility Copyright 2019 The Tekton Authors - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,6 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// The contents of this package are deprecated and unused. Preserved for backwards compatibility. package v1alpha1 import ( @@ -23,33 +22,10 @@ import ( // PipelineResourceType represents the type of endpoint the pipelineResource is, so that the // controller will know this pipelineResource shouldx be fetched and optionally what // additional metatdata should be provided for it. +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceType = string -var ( - // AllowedOutputResources are the resource types that can be used as outputs - AllowedOutputResources = map[PipelineResourceType]bool{ - PipelineResourceTypeStorage: true, - PipelineResourceTypeGit: true, - } -) - -const ( - // PipelineResourceTypeGit indicates that this source is a GitHub repo. - PipelineResourceTypeGit PipelineResourceType = "git" - - // PipelineResourceTypeStorage indicates that this source is a storage blob resource. - PipelineResourceTypeStorage PipelineResourceType = "storage" - - // PipelineResourceTypeImage indicates that this source is a docker Image. - PipelineResourceTypeImage PipelineResourceType = "image" - - // PipelineResourceTypeGCS is the subtype for the GCSResources, which is backed by a GCS blob/directory. - PipelineResourceTypeGCS PipelineResourceType = "gcs" -) - -// AllResourceTypes can be used for validation to check if a provided Resource type is one of the known types. -var AllResourceTypes = []PipelineResourceType{PipelineResourceTypeGit, PipelineResourceTypeStorage, PipelineResourceTypeImage} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +genclient:noStatus @@ -57,6 +33,7 @@ var AllResourceTypes = []PipelineResourceType{PipelineResourceTypeGit, PipelineR // PipelineResource describes a resource that is an input to or output from a // Task. // +// Deprecated: Unused, preserved only for backwards compatibility // +k8s:openapi-gen=true type PipelineResource struct { metav1.TypeMeta `json:",inline"` @@ -66,20 +43,23 @@ type PipelineResource struct { // Spec holds the desired state of the PipelineResource from the client Spec PipelineResourceSpec `json:"spec,omitempty"` - // Status is deprecated. - // It usually is used to communicate the observed state of the PipelineResource from + // Status is used to communicate the observed state of the PipelineResource from // the controller, but was unused as there is no controller for PipelineResource. + // // +optional Status *PipelineResourceStatus `json:"status,omitempty"` } // PipelineResourceStatus does not contain anything because PipelineResources on their own // do not have a status -// Deprecated +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceStatus struct { } -// PipelineResourceSpec defines an individual resources used in the pipeline. +// PipelineResourceSpec defines an individual resources used in the pipeline. +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceSpec struct { // Description is a user-facing description of the resource that may be // used to populate a UI. @@ -95,6 +75,8 @@ type PipelineResourceSpec struct { } // SecretParam indicates which secret can be used to populate a field of the resource +// +// Deprecated: Unused, preserved only for backwards compatibility type SecretParam struct { FieldName string `json:"fieldName"` SecretKey string `json:"secretKey"` @@ -103,6 +85,8 @@ type SecretParam struct { // ResourceParam declares a string value to use for the parameter called Name, and is used in // the specific context of PipelineResources. +// +// Deprecated: Unused, preserved only for backwards compatibility type ResourceParam struct { Name string `json:"name"` Value string `json:"value"` @@ -113,6 +97,8 @@ type ResourceParam struct { // PipelineResources within the type's definition, and when provided as an Input, the Name will be the // path to the volume mounted containing this PipelineResource as an input (e.g. // an input Resource named `workspace` will be mounted at `/workspace`). +// +// Deprecated: Unused, preserved only for backwards compatibility type ResourceDeclaration struct { // Name declares the name by which a resource is referenced in the // definition. Resources may be referenced by name in the definition of a @@ -138,6 +124,8 @@ type ResourceDeclaration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PipelineResourceList contains a list of PipelineResources +// +// Deprecated: Unused, preserved only for backwards compatibility type PipelineResourceList struct { metav1.TypeMeta `json:",inline"` // +optional diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipelineresource_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipelineresource_validation.go deleted file mode 100644 index f4004b05b9..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipelineresource_validation.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*PipelineResource)(nil) - -// Validate validates the PipelineResource's ObjectMeta and Spec -func (r *PipelineResource) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(r.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return r.Spec.Validate(ctx) -} - -// Validate validates the PipelineResourceSpec based on its type -func (rs *PipelineResourceSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(rs, &PipelineResourceSpec{}) { - return apis.ErrMissingField("spec.type") - } - if rs.Type == PipelineResourceTypeStorage { - foundTypeParam := false - var location string - for _, param := range rs.Params { - switch { - case strings.EqualFold(param.Name, "type"): - if !AllowedStorageType(param.Value) { - return apis.ErrInvalidValue(param.Value, "spec.params.type") - } - foundTypeParam = true - case strings.EqualFold(param.Name, "Location"): - location = param.Value - } - } - - if !foundTypeParam { - return apis.ErrMissingField("spec.params.type") - } - if location == "" { - return apis.ErrMissingField("spec.params.location") - } - } - - for _, allowedType := range AllResourceTypes { - if allowedType == rs.Type { - return nil - } - } - - return apis.ErrInvalidValue("spec.type", rs.Type) -} - -// AllowedStorageType returns true if the provided string can be used as a storage type, and false otherwise -func AllowedStorageType(gotType string) bool { - return gotType == PipelineResourceTypeGCS -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/register.go index 78367093f3..67ccbd27b7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/register.go @@ -1,12 +1,9 @@ /* Copyright 2019 The Tekton Authors - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go index f32ca9a944..1d509ceadd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go @@ -28,7 +28,7 @@ import ( func SerializeToMetadata(meta *metav1.ObjectMeta, field interface{}, key string) error { bytes, err := json.Marshal(field) if err != nil { - return fmt.Errorf("error serializing field: %s", err) + return fmt.Errorf("error serializing field: %w", err) } if meta.Annotations == nil { meta.Annotations = make(map[string]string) @@ -46,7 +46,7 @@ func DeserializeFromMetadata(meta *metav1.ObjectMeta, to interface{}, key string } if str, ok := meta.Annotations[key]; ok { if err := json.Unmarshal([]byte(str), to); err != nil { - return fmt.Errorf("error deserializing key %s from metadata: %s", key, err) + return fmt.Errorf("error deserializing key %s from metadata: %w", key, err) } delete(meta.Annotations, key) if len(meta.Annotations) == 0 { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go index 21c3e532df..ffc8f29c1f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go @@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/result/result.go b/vendor/github.com/tektoncd/pipeline/pkg/result/result.go new file mode 100644 index 0000000000..cfcbc3e90a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/result/result.go @@ -0,0 +1,92 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package result + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-multierror" +) + +const ( + // TaskRunResultType default task run result value + TaskRunResultType ResultType = 1 + // reserved: 2 + // was RunResultType + + // InternalTektonResultType default internal tekton result value + InternalTektonResultType = 3 + // UnknownResultType default unknown result type value + UnknownResultType = 10 +) + +// RunResult is used to write key/value pairs to TaskRun pod termination messages. +// The key/value pairs may come from the entrypoint binary, or represent a TaskRunResult. +// If they represent a TaskRunResult, the key is the name of the result and the value is the +// JSON-serialized value of the result. +type RunResult struct { + Key string `json:"key"` + Value string `json:"value"` + // ResourceName may be used in tests, but it is not populated in termination messages. + // It is preserved here for backwards compatibility and will not be ported to v1. + ResourceName string `json:"resourceName,omitempty"` + ResultType ResultType `json:"type,omitempty"` +} + +// ResultType used to find out whether a RunResult is from a task result or not +// Note that ResultsType is another type which is used to define the data type +// (e.g. string, array, etc) we used for Results +// +//nolint:revive // revive complains about stutter of `result.ResultType`. +type ResultType int + +// UnmarshalJSON unmarshals either an int or a string into a ResultType. String +// ResultTypes were removed because they made JSON messages bigger, which in +// turn limited the amount of space in termination messages for task results. String +// support is maintained for backwards compatibility - the Pipelines controller could +// be stopped midway through TaskRun execution, updated with support for int in place +// of string, and then fail the running TaskRun because it doesn't know how to interpret +// the string value that the TaskRun's entrypoint will emit when it completes. +func (r *ResultType) UnmarshalJSON(data []byte) error { + var asInt int + var intErr error + + if err := json.Unmarshal(data, &asInt); err != nil { + intErr = err + } else { + *r = ResultType(asInt) + return nil + } + + var asString string + + if err := json.Unmarshal(data, &asString); err != nil { + return fmt.Errorf("unsupported value type, neither int nor string: %w", multierror.Append(intErr, err).ErrorOrNil()) + } + + switch asString { + case "TaskRunResult": + *r = TaskRunResultType + case "InternalTektonResult": + *r = InternalTektonResultType + default: + *r = UnknownResultType + } + + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index a2611061c0..cb93bc8e7f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -276,7 +276,7 @@ func extractEntireVariablesFromString(s, prefix string) ([]string, error) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re, err := regexp.Compile(pattern) if err != nil { - return nil, fmt.Errorf("Fail to parse regex pattern: %v", err) + return nil, fmt.Errorf("Fail to parse regex pattern: %w", err) } matches := re.FindAllStringSubmatch(s, -1) diff --git a/vendor/github.com/tektoncd/pipeline/test/parse/yaml.go b/vendor/github.com/tektoncd/pipeline/test/parse/yaml.go index 8b08d2acbf..3ee9f8349a 100644 --- a/vendor/github.com/tektoncd/pipeline/test/parse/yaml.go +++ b/vendor/github.com/tektoncd/pipeline/test/parse/yaml.go @@ -18,8 +18,6 @@ import ( v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" "k8s.io/apimachinery/pkg/runtime" @@ -47,17 +45,6 @@ kind: TaskRun return &tr } -// MustParseRun takes YAML and parses it into a *v1alpha1.Run -func MustParseRun(t *testing.T, yaml string) *v1alpha1.Run { - t.Helper() - var r v1alpha1.Run - yaml = `apiVersion: tekton.dev/v1alpha1 -kind: Run -` + yaml - mustParseYAML(t, yaml, &r) - return &r -} - // MustParseV1beta1Task takes YAML and parses it into a *v1beta1.Task func MustParseV1beta1Task(t *testing.T, yaml string) *v1beta1.Task { t.Helper() @@ -146,17 +133,6 @@ kind: Pipeline return &pipeline } -// MustParsePipelineResource takes YAML and parses it into a *resourcev1alpha1.PipelineResource -func MustParsePipelineResource(t *testing.T, yaml string) *resourcev1alpha1.PipelineResource { - t.Helper() - var resource resourcev1alpha1.PipelineResource - yaml = `apiVersion: tekton.dev/v1alpha1 -kind: PipelineResource -` + yaml - mustParseYAML(t, yaml, &resource) - return &resource -} - // MustParseVerificationPolicy takes YAML and parses it into a *v1alpha1.VerificationPolicy func MustParseVerificationPolicy(t *testing.T, yaml string) *v1alpha1.VerificationPolicy { t.Helper() diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 32e4812755..40d62fa2eb 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -2,3 +2,5 @@ http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 33609fb510..1d9726f60b 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,65 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + ## [1.13.0/0.36.0] 2023-02-07 ### Added @@ -2243,7 +2302,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.13.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 [1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 [1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 [1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 @@ -2303,3 +2363,7 @@ It contains api and sdk for trace and meter. [0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 9371a481ab..a6928bfdff 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -508,7 +508,7 @@ Approvers: - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta +- [Damien Mathieu](https://github.com/dmathieu), Elastic Maintainers: diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index befb040a77..0e6ffa284e 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -210,8 +210,9 @@ SEMCONVPKG ?= "semconv/" semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: prerelease diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 1b2ee21fbf..878d87e58b 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -50,14 +50,19 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | | ------- | ---------- | ------------ | +| Ubuntu | 1.20 | amd64 | | Ubuntu | 1.19 | amd64 | | Ubuntu | 1.18 | amd64 | +| Ubuntu | 1.20 | 386 | | Ubuntu | 1.19 | 386 | | Ubuntu | 1.18 | 386 | +| MacOS | 1.20 | amd64 | | MacOS | 1.19 | amd64 | | MacOS | 1.18 | amd64 | +| Windows | 1.20 | amd64 | | Windows | 1.19 | amd64 | | Windows | 1.18 | amd64 | +| Windows | 1.20 | 386 | | Windows | 1.19 | 386 | | Windows | 1.18 | 386 | diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 34a4e548dd..cb21dd5c09 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -68,7 +68,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} } // IntValue creates an INT64 Value. @@ -99,7 +99,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -112,7 +112,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} } // StringValue creates a STRING Value. @@ -125,7 +125,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} } // Type returns a type of the Value. @@ -149,7 +149,7 @@ func (v Value) AsBoolSlice() []bool { } func (v Value) asBoolSlice() []bool { - return attribute.AsSlice[bool](v.slice) + return attribute.AsBoolSlice(v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -168,7 +168,7 @@ func (v Value) AsInt64Slice() []int64 { } func (v Value) asInt64Slice() []int64 { - return attribute.AsSlice[int64](v.slice) + return attribute.AsInt64Slice(v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -187,7 +187,7 @@ func (v Value) AsFloat64Slice() []float64 { } func (v Value) asFloat64Slice() []float64 { - return attribute.AsSlice[float64](v.slice) + return attribute.AsFloat64Slice(v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -206,7 +206,7 @@ func (v Value) AsStringSlice() []string { } func (v Value) asStringSlice() []string { - return attribute.AsSlice[string](v.slice) + return attribute.AsStringSlice(v.slice) } type unknownValueType struct{} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 2203489447..622c3ee3f2 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -22,24 +22,90 @@ import ( "reflect" ) -// SliceValue convert a slice into an array with same elements as slice. -func SliceValue[T bool | int64 | float64 | string](v []T) any { - var zero T +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) return cp.Elem().Interface() } -// AsSlice convert an array into a slice into with same elements as array. -func AsSlice[T bool | int64 | float64 | string](v any) []T { +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil } - var zero T + var zero string correctLen := rv.Len() correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) cpy := reflect.New(correctType) _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]T) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index f058cc781e..cb3efbb9ad 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -25,6 +25,7 @@ type TracerConfig struct { instrumentationVersion string // Schema URL of the telemetry emitted by the Tracer. schemaURL string + attrs attribute.Set } // InstrumentationVersion returns the version of the library providing instrumentation. @@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string { return t.instrumentationVersion } +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. func (t *TracerConfig) SchemaURL() string { return t.schemaURL @@ -307,6 +314,16 @@ func WithInstrumentationVersion(version string) TracerOption { }) } +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + // WithSchemaURL sets the schema URL for the Tracer. func WithSchemaURL(schemaURL string) TracerOption { return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index d82fbaf550..0e8e5e0232 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.13.0" + return "1.14.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 22df4553c2..40df1fae41 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,10 +14,11 @@ module-sets: stable-v1: - version: v1.13.0 + version: v1.14.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/fib - go.opentelemetry.io/otel/example/jaeger - go.opentelemetry.io/otel/example/namedtracer @@ -34,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.36.0 + version: v0.37.0 modules: - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus @@ -49,7 +50,7 @@ module-sets: - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/view experimental-schema: - version: v0.0.3 + version: v0.0.4 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 0000000000..ecc0dabb74 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 8a237c5d61..cff0cd49ec 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -128,6 +128,12 @@ func Contains[E comparable](s []E, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. // In the returned slice r, r[i] == v[0]. @@ -151,12 +157,35 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { // Delete removes the elements s[i:j] from s, returning the modified slice. // Delete panics if s[i:j] is not a valid slice of s. // Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// Delete is O(len(s)-j), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + return append(s[:i], s[j:]...) } +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. func Clone[S ~[]E, E any](s S) S { @@ -170,8 +199,11 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 @@ -188,7 +220,7 @@ func Compact[S ~[]E, E comparable](s S) S { // CompactFunc is like Compact but uses a comparison function. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 @@ -205,11 +237,19 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. Grow may modify elements of the -// slice between the length and the capacity. If n is negative or too large to +// to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. func Grow[S ~[]E, E any](s S, n int) S { - return append(s, make(S, n)...)[:len(s)] + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd10..f14f40da71 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -30,7 +30,7 @@ func SortFunc[E any](x []E, less func(a, b E) bool) { pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) } -// SortStable sorts the slice x while keeping the original order of equal +// SortStableFunc sorts the slice x while keeping the original order of equal // elements, using less to compare elements. func SortStableFunc[E any](x []E, less func(a, b E) bool) { stableLessFunc(x, len(x), less) @@ -62,15 +62,22 @@ func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // search returns the leftmost position where f returns true, or len(x) if f - // returns false for all x. This is the insertion position for target in x, - // and could point to an element that's either == target or not. - pos := search(len(x), func(i int) bool { return x[i] >= target }) - if pos >= len(x) || x[pos] != target { - return pos, false - } else { - return pos, true + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target } // BinarySearchFunc works like BinarySearch, but uses a custom comparison @@ -78,30 +85,22 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { // defined by cmp. cmp(a, b) is expected to return an integer comparing the two // parameters: 0 if a == b, a negative number if a < b and a positive number if // a > b. -func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { - pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) - if pos >= len(x) || cmp(x[pos], target) != 0 { - return pos, false - } else { - return pos, true - } -} - -func search(n int, f func(int) bool) int { - // Define f(-1) == false and f(n) == true. - // Invariant: f(i-1) == false, f(j) == true. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. i, j := 0, n for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if !f(h) { - i = h + 1 // preserves f(i-1) == false + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 } else { - j = h // preserves f(j) == true + j = h // preserves cmp(x[j], target) >= 0 } } - // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. - return i + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 } type sortedHint int // hint for pdqsort when choosing the pivot diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 1473e1296d..781770c204 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index db6b19e93d..b3e8783cc5 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -13,12 +13,15 @@ import ( "os" "path/filepath" "runtime" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials @@ -66,6 +69,14 @@ type CredentialsParams struct { // The OAuth2 TokenURL default override. This value overrides the default TokenURL, // unless explicitly specified by the credentials config file. Optional. TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -153,13 +164,12 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar id, _ := metadata.ProjectID() return &Credentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", params.Scopes...), + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } // FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 8a3349fc2c..ca717634a3 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -26,7 +26,7 @@ // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC). +// provider that supports OpenID Connect (OIDC) or SAML 2.0. // Traditionally, applications running outside Google Cloud have used service // account keys to access Google Cloud resources. Using identity federation, // you can allow your workload to impersonate a service account. @@ -36,26 +36,70 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml // // For OIDC and SAML providers, the library can retrieve tokens in three ways: // from a local file location (file-sourced credentials), from a server // (URL-sourced credentials), or from a local executable (executable-sourced // credentials). // For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC token prior to expiration. +// refreshing the file location with a new OIDC/SAML token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file // every hour. The token can be stored directly as plain text or in JSON format. // For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC token. The response can be in plain text or JSON. +// return the OIDC/SAML token. The response can be in plain text or JSON. // Additional required request headers can also be specified. // For executable-sourced credentials, an application needs to be available to -// output the OIDC token and other information in a JSON format. +// output the OIDC/SAML token and other information in a JSON format. // For more information on how these work (and how to implement // executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/using-workload-identity-federation#oidc +// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// +// # Workforce Identity Federation +// +// Workforce identity federation lets you use an external identity provider (IdP) to +// authenticate and authorize a workforce—a group of users, such as employees, partners, +// and contractors—using IAM, so that the users can access Google Cloud services. +// Workforce identity federation extends Google Cloud's identity capabilities to support +// syncless, attribute-based single sign on. +// +// With workforce identity federation, your workforce can access Google Cloud resources +// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +// Services (AD FS), Okta, and others. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml +// +// For workforce identity federation, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC/SAML token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC/SAML token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in // // Note that this library does not perform any validation on the token_url, token_info_url, // or service_account_impersonation_url fields of the credential configuration. @@ -86,5 +130,4 @@ // same as the one obtained from the oauth2.Config returned from ConfigFromJSON or // JWTConfigFromJSON, but the Credentials may contain additional information // that is useful is some circumstances. -// package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index a1b629a2eb..cc1223889e 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -231,7 +231,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 291df5c833..9085fabe34 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // State is a token to protect the user from CSRF attacks. You must // always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. +// state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well @@ -290,6 +291,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 822720341a..7c64006de6 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 0000000000..be8f5a867e --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,762 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/typeparams" + + _ "unsafe" // for go:linkname +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func For(obj types.Object) (Path, error) { + return newEncoderFor()(obj) +} + +// An encoder amortizes the cost of encoding the paths of multiple objects. +// Nonexported pending approval of proposal 58668. +type encoder struct { + scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() +} + +// Exposed to gopls via golang.org/x/tools/internal/typesinternal +// pending approval of proposal 58668. +// +//go:linkname newEncoderFor +func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } + +func (enc *encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + names := enc.scopeNames(scope) + for _, name := range names { + o := scope.Lookup(name) + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, name...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, name := range names { + o := scope.Lookup(name) + path := append(empty, name...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *typeparams.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *typeparams.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*typeparams.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + methods := namedMethods(t) // (unmemoized) + if index >= len(methods) { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) + } + obj = methods[index] // Id-ordered + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods +} + +// scopeNames is a memoization of scope.Names. Callers must not modify the result. +func (enc *encoder) scopeNames(scope *types.Scope) []string { + m := enc.scopeNamesMemo + if m == nil { + m = make(map[*types.Scope][]string) + enc.scopeNamesMemo = m + } + names, ok := m[scope] + if !ok { + names = scope.Names() // allocates and sorts + m[scope] = names + } + return names +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods + +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a64..a973dece93 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11ce2..34fc783f82 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -12,6 +12,7 @@ package gcimporter import ( "go/token" "go/types" + "sort" "strings" "golang.org/x/tools/internal/pkgbits" @@ -121,6 +122,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st iface.Complete() } + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + pkg.MarkComplete() return pkg } @@ -260,39 +271,9 @@ func (r *reader) doPkg() *types.Package { pkg := types.NewPackage(path, name) r.p.imports[path] = pkg - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(flattenImports(imports)) - return pkg } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { - var res []*types.Package - seen := make(map[*types.Package]struct{}) - for _, pkg := range pkgs { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - - // pkg.Imports() is already flattened. - for _, pkg := range pkg.Imports() { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - } - } - return res -} - // @@@ Types func (r *reader) typ() types.Type { diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d30..cfba8189f1 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -87,7 +87,6 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } base := recv.Type() diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index ce7d4351b2..3c53fbc63b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -50,3 +52,10 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } + +// NewObjectpathEncoder returns a function closure equivalent to +// objectpath.For but amortized for multiple (sequential) calls. +// It is a temporary workaround, pending the approval of proposal 58668. +// +//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor +func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/vendor/google.golang.org/api/idtoken/idtoken.go b/vendor/google.golang.org/api/idtoken/idtoken.go index b7a82e92bf..bd29a95a3c 100644 --- a/vendor/google.golang.org/api/idtoken/idtoken.go +++ b/vendor/google.golang.org/api/idtoken/idtoken.go @@ -34,6 +34,7 @@ const ( unknownCredType credentialsType = iota serviceAccount impersonatedServiceAccount + externalAccount ) // NewClient creates a HTTP Client that automatically adds an ID token to each @@ -139,7 +140,7 @@ func tokenSourceFromBytes(ctx context.Context, data []byte, audience string, ds return nil, err } return oauth2.ReuseTokenSource(tok, ts), nil - case impersonatedServiceAccount: + case impersonatedServiceAccount, externalAccount: type url struct { ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` } @@ -155,7 +156,7 @@ func tokenSourceFromBytes(ctx context.Context, data []byte, audience string, ds TargetPrincipal: account, IncludeEmail: true, } - ts, err := impersonate.IDTokenSource(ctx, config) + ts, err := impersonate.IDTokenSource(ctx, config, option.WithCredentialsJSON(data)) if err != nil { return nil, err } @@ -188,6 +189,8 @@ func parseCredType(typeString string) credentialsType { return serviceAccount case "impersonated_service_account": return impersonatedServiceAccount + case "external_account": + return externalAccount default: return unknownCredType } diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/internal/cert/default_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/default_cert.go rename to vendor/google.golang.org/api/internal/cert/default_cert.go diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/enterprise_cert.go rename to vendor/google.golang.org/api/internal/cert/enterprise_cert.go diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/secureconnect_cert.go rename to vendor/google.golang.org/api/internal/cert/secureconnect_cert.go diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 32d52413b3..63c6609220 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -6,10 +6,15 @@ package internal import ( "context" + "crypto/tls" "encoding/json" "errors" "fmt" "io/ioutil" + "net" + "net/http" + "os" + "time" "golang.org/x/oauth2" "google.golang.org/api/internal/impersonate" @@ -17,6 +22,8 @@ import ( "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { @@ -80,8 +87,25 @@ const ( // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. + clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + if err != nil { + return nil, err + } + params.TokenURL = oauth2Endpoint + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) + } + // By default, a standard OAuth 2.0 token source is created - cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) if err != nil { return nil, err } @@ -131,14 +155,22 @@ func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource } } -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } var v struct { QuotaProject string `json:"quota_project_id"` } - if err := json.Unmarshal(cred.JSON, &v); err != nil { + if err := json.Unmarshal(creds.JSON, &v); err != nil { return "" } return v.QuotaProject @@ -157,3 +189,35 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * ProjectID: creds.ProjectID, }, nil } + +// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. +func oauth2DialSettings(ds *DialSettings) *DialSettings { + var ods DialSettings + ods.DefaultEndpoint = google.Endpoint.TokenURL + ods.DefaultMTLSEndpoint = google.MTLSTokenURL + ods.ClientCertSource = ds.ClientCertSource + return &ods +} + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/internal/dca.go similarity index 92% rename from vendor/google.golang.org/api/transport/internal/dca/dca.go rename to vendor/google.golang.org/api/internal/dca.go index 78004f0475..204a3fd2f3 100644 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ b/vendor/google.golang.org/api/internal/dca.go @@ -23,15 +23,16 @@ // // This package is not intended for use by end developers. Use the // google.golang.org/api/option package to configure API clients. -package dca + +// Package internal supports the options and transport packages. +package internal import ( "net/url" "os" "strings" - "google.golang.org/api/internal" - "google.golang.org/api/transport/cert" + "google.golang.org/api/internal/cert" ) const ( @@ -43,7 +44,7 @@ const ( // GetClientCertificateSourceAndEndpoint is a convenience function that invokes // getClientCertificateSource and getEndpoint sequentially and returns the client // cert source and endpoint as a tuple. -func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { +func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { clientCertSource, err := getClientCertificateSource(settings) if err != nil { return nil, "", err @@ -65,7 +66,7 @@ func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cer // Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE // must be set to "true" to allow certificate to be used (including user provided // certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { if !isClientCertificateEnabled() { return nil, nil } else if settings.ClientCertSource != nil { @@ -94,7 +95,7 @@ func isClientCertificateEnabled() bool { // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and // WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { if settings.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index f135505662..19df50de13 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.110.0" +const Version = "0.116.0" diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 47568a4061..403509d08f 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -20,10 +20,9 @@ import ( "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" + "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" - "google.golang.org/api/transport/internal/dca" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -34,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) + clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) if err != nil { return nil, "", err } @@ -66,7 +65,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna paramTransport := ¶meterTransport{ base: base, userAgent: settings.UserAgent, - quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport @@ -75,6 +73,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna case settings.NoAuth: // Do nothing. case settings.APIKey != "": + paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) trans = &transport.APIKey{ Transport: trans, Key: settings.APIKey, @@ -84,10 +83,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if paramTransport.quotaProject == "" { - paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) - } - + paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { ts = settings.TokenSource diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index af72196c80..3543268f84 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/httpbody.proto package httpbody diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 2428963c0e..005513f2a7 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -70,11 +70,14 @@ func SetLogger(logger logr.Logger) { // routing log entries through klogr into klog and then into the actual Logger // backend. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - logging.logger = &logger logging.loggerOptions = loggerOptions{} for _, opt := range opts { opt(&logging.loggerOptions) } + logging.logger = &logWriter{ + Logger: logger, + writeKlogBuffer: logging.loggerOptions.writeKlogBuffer, + } } // ContextualLogger determines whether the logger passed to @@ -93,6 +96,22 @@ func FlushLogger(flush func()) LoggerOption { } } +// WriteKlogBuffer sets a callback that will be invoked by klog to write output +// produced by non-structured log calls like Infof. +// +// The buffer will contain exactly the same data that klog normally would write +// into its own output stream(s). In particular this includes the header, if +// klog is configured to write one. The callback then can divert that data into +// its own output streams. The buffer may or may not end in a line break. +// +// Without such a callback, klog will call the logger's Info or Error method +// with just the message string (i.e. no header). +func WriteKlogBuffer(write func([]byte)) LoggerOption { + return func(o *loggerOptions) { + o.writeKlogBuffer = write + } +} + // LoggerOption implements the functional parameter paradigm for // SetLoggerWithOptions. type LoggerOption func(o *loggerOptions) @@ -100,6 +119,13 @@ type LoggerOption func(o *loggerOptions) type loggerOptions struct { contextualLogger bool flush func() + writeKlogBuffer func([]byte) +} + +// logWriter combines a logger (always set) with a write callback (optional). +type logWriter struct { + Logger + writeKlogBuffer func([]byte) } // ClearLogger removes a backing Logger implementation if one was set earlier @@ -152,7 +178,7 @@ func Background() Logger { if logging.loggerOptions.contextualLogger { // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *logging.logger + return logging.logger.Logger } return klogLogger diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index d53b49da39..f325ded5e9 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -55,6 +55,17 @@ func GetBuffer() *Buffer { // PutBuffer returns a buffer to the free list. func PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death, without relying on + // sync.Pool behavior. The documentation implies that items may + // get deallocated while stored there ("If the Pool holds the + // only reference when this [= be removed automatically] + // happens, the item might be deallocated."), but + // https://github.com/golang/go/issues/23199 leans more towards + // having such a size limit. + return + } + buffers.Put(b) } @@ -99,7 +110,8 @@ func (buf *Buffer) someDigits(i, d int) int { return copy(buf.Tmp[i:], buf.Tmp[j:]) } -// FormatHeader formats a log header using the provided file name and line number. +// FormatHeader formats a log header using the provided file name and line number +// and writes it into the buffer. func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { if line < 0 { line = 0 // not a real line number, but acceptable to someDigits @@ -135,3 +147,30 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now buf.Tmp[n+2] = ' ' buf.Write(buf.Tmp[:n+3]) } + +// SprintHeader formats a log header and returns a string. This is a simpler +// version of FormatHeader for use in ktesting. +func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ']' + return string(buf.Tmp[:22]) +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index f9558c3d28..1dc81a15fa 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -95,9 +95,15 @@ func MergeKVs(first, second []interface{}) []interface{} { return merged } +type Formatter struct { + AnyToStringHook AnyToStringFunc +} + +type AnyToStringFunc func(v interface{}) string + // MergeKVsInto is a variant of MergeKVs which directly formats the key/value // pairs into a buffer. -func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { +func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { if len(first) == 0 && len(second) == 0 { // Nothing to do at all. return @@ -107,7 +113,7 @@ func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { // Nothing to be overridden, second slice is well-formed // and can be used directly. for i := 0; i < len(second); i += 2 { - KVFormat(b, second[i], second[i+1]) + f.KVFormat(b, second[i], second[i+1]) } return } @@ -127,24 +133,28 @@ func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { if overrides[key] { continue } - KVFormat(b, key, first[i+1]) + f.KVFormat(b, key, first[i+1]) } // Round down. l := len(second) l = l / 2 * 2 for i := 1; i < l; i += 2 { - KVFormat(b, second[i-1], second[i]) + f.KVFormat(b, second[i-1], second[i]) } if len(second)%2 == 1 { - KVFormat(b, second[len(second)-1], missingValue) + f.KVFormat(b, second[len(second)-1], missingValue) } } +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + Formatter{}.MergeAndFormatKVs(b, first, second) +} + const missingValue = "(MISSING)" // KVListFormat serializes all key/value pairs into the provided buffer. // A space gets inserted before the first pair and between each pair. -func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { +func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { for i := 0; i < len(keysAndValues); i += 2 { var v interface{} k := keysAndValues[i] @@ -153,13 +163,17 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { } else { v = missingValue } - KVFormat(b, k, v) + f.KVFormat(b, k, v) } } +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + Formatter{}.KVListFormat(b, keysAndValues...) +} + // KVFormat serializes one key/value pair into the provided buffer. // A space gets inserted before the pair. -func KVFormat(b *bytes.Buffer, k, v interface{}) { +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { b.WriteByte(' ') // Keys are assumed to be well-formed according to // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments @@ -203,7 +217,7 @@ func KVFormat(b *bytes.Buffer, k, v interface{}) { case string: writeStringValue(b, true, value) default: - writeStringValue(b, false, fmt.Sprintf("%+v", value)) + writeStringValue(b, false, f.AnyToString(value)) } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided @@ -220,8 +234,20 @@ func KVFormat(b *bytes.Buffer, k, v interface{}) { b.WriteByte('=') b.WriteString(fmt.Sprintf("%+q", v)) default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) + writeStringValue(b, false, f.AnyToString(v)) + } +} + +func KVFormat(b *bytes.Buffer, k, v interface{}) { + Formatter{}.KVFormat(b, k, v) +} + +// AnyToString is the historic fallback formatter. +func (f Formatter) AnyToString(v interface{}) string { + if f.AnyToStringHook != nil { + return f.AnyToStringHook(v) } + return fmt.Sprintf("%+v", v) } // StringerToString converts a Stringer to a string, diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index c5d98ad38c..466eeaf265 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -91,8 +91,6 @@ import ( "sync/atomic" "time" - "github.com/go-logr/logr" - "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/clock" "k8s.io/klog/v2/internal/dbg" @@ -453,7 +451,7 @@ type settings struct { // logger is the global Logger chosen by users of klog, nil if // none is available. - logger *Logger + logger *logWriter // loggerOptions contains the options that were supplied for // globalLogger. @@ -525,6 +523,11 @@ func (s settings) deepCopy() settings { } s.vmodule.filter = filter + if s.logger != nil { + logger := *s.logger + s.logger = &logger + } + return s } @@ -668,15 +671,16 @@ func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buf return buf } -func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logger is set, we clear the generated header as we rely on the backing - // logger implementation to print headers - if logger != nil { + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { buffer.PutBuffer(buf) buf = buffer.GetBuffer() } @@ -687,15 +691,16 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { buffer.PutBuffer(buf) buf = buffer.GetBuffer() } @@ -709,15 +714,16 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) { l.printfDepth(s, logger, filter, 1, format, args...) } -func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { +func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { buffer.PutBuffer(buf) buf = buffer.GetBuffer() } @@ -734,11 +740,12 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { buffer.PutBuffer(buf) buf = buffer.GetBuffer() } @@ -753,7 +760,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -765,7 +772,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -846,7 +853,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { var isLocked = true l.mu.Lock() defer func() { @@ -862,13 +869,17 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } data := buf.Bytes() - if log != nil { - // TODO: set 'severity' and caller information as structured log info - // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == severity.ErrorLog { - logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) + if logger != nil { + if logger.writeKlogBuffer != nil { + logger.writeKlogBuffer(data) } else { - log.WithCallDepth(depth + 3).Info(string(data)) + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == severity.ErrorLog { + logger.WithCallDepth(depth+3).Error(nil, string(data)) + } else { + logger.WithCallDepth(depth + 3).Info(string(data)) + } } } else if l.toStderr { os.Stderr.Write(data) @@ -1277,7 +1288,7 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr *logr.Logger + logger *logWriter } func newVerbose(level Level, b bool) Verbose { @@ -1285,7 +1296,7 @@ func newVerbose(level Level, b bool) Verbose { return Verbose{b, nil} } v := logging.logger.V(int(level)) - return Verbose{b, &v} + return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}} } // V reports whether verbosity at the call site is at least the requested level. @@ -1359,7 +1370,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(severity.InfoLog, v.logr, logging.filter, args...) + logging.print(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1367,7 +1378,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoDepth(depth int, args ...interface{}) { if v.enabled { - logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1375,7 +1386,7 @@ func (v Verbose) InfoDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(severity.InfoLog, v.logr, logging.filter, args...) + logging.println(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1383,7 +1394,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfolnDepth(depth int, args ...interface{}) { if v.enabled { - logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1391,7 +1402,7 @@ func (v Verbose) InfolnDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...) } } @@ -1399,7 +1410,7 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { if v.enabled { - logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...) } } @@ -1407,7 +1418,7 @@ func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...) } } @@ -1421,14 +1432,14 @@ func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, args...) + logging.errorS(err, v.logger, logging.filter, 0, msg, args...) } } @@ -1436,7 +1447,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...) } } diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go new file mode 100644 index 0000000000..16e34853af --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -0,0 +1,264 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides a cache mechanism based on etags to lazily +// build, and/or cache results from expensive operation such that those +// operations are not repeated unnecessarily. The operations can be +// created as a tree, and replaced dynamically as needed. +// +// # Dependencies and types of caches +// +// This package uses a source/transform/sink model of caches to build +// the dependency tree, and can be used as follows: +// - [NewSource]: A source cache that recomputes the content every time. +// - [NewStaticSource]: A source cache that always produces the +// same content, it is only called once. +// - [NewTransformer]: A cache that transforms data from one format to +// another. It's only refreshed when the source changes. +// - [NewMerger]: A cache that aggregates multiple caches into one. +// It's only refreshed when the source changes. +// - [Replaceable]: A cache adapter that can be atomically +// replaced with a new one, and saves the previous results in case an +// error pops-up. +// +// # Atomicity +// +// Most of the operations are not atomic/thread-safe, except for +// [Replaceable.Replace] which can be performed while the objects +// are being read. +// +// # Etags +// +// Etags in this library is a cache version identifier. It doesn't +// necessarily strictly match to the semantics of http `etags`, but are +// somewhat inspired from it and function with the same principles. +// Hashing the content is a good way to guarantee that your function is +// never going to be called spuriously. In Kubernetes world, this could +// be a `resourceVersion`, this can be an actual etag, a hash, a UUID +// (if the cache always changes), or even a made-up string when the +// content of the cache never changes. +package cached + +import ( + "fmt" + "sync/atomic" +) + +// Result is the content returned from a call to a cache. It can either +// be created with [NewResultOK] if the call was a success, or +// [NewResultErr] if the call resulted in an error. +type Result[T any] struct { + Data T + Etag string + Err error +} + +// NewResultOK creates a new [Result] for a successful operation. +func NewResultOK[T any](data T, etag string) Result[T] { + return Result[T]{ + Data: data, + Etag: etag, + } +} + +// NewResultErr creates a new [Result] when an error has happened. +func NewResultErr[T any](err error) Result[T] { + return Result[T]{ + Err: err, + } +} + +// Result can be treated as a [Data] if necessary. +func (r Result[T]) Get() Result[T] { + return r +} + +// Data is a cache that performs an action whose result data will be +// cached. It also returns an "etag" identifier to version the cache, so +// that the caller can know if they have the most recent version of the +// cache (and can decide to cache some operation based on that). +// +// The [NewMerger] and [NewTransformer] automatically handle +// that for you by checking if the etag is updated before calling the +// merging or transforming function. +type Data[T any] interface { + // Returns the cached data, as well as an "etag" to identify the + // version of the cache, or an error if something happened. + Get() Result[T] +} + +// T is the source type, V is the destination type. +type merger[K comparable, T, V any] struct { + mergeFn func(map[K]Result[T]) Result[V] + caches map[K]Data[T] + cacheResults map[K]Result[T] + result Result[V] +} + +// NewMerger creates a new merge cache, a cache that merges the result +// of other caches. The function only gets called if any of the +// dependency has changed. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is reran. +// +// The caches and results are mapped by K so that associated data can be +// retrieved. The map of dependencies can not be modified after +// creation, and a new merger should be created (and probably replaced +// using a [Replaceable]). +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { + return &merger[K, T, V]{ + mergeFn: mergeFn, + caches: caches, + } +} + +func (c *merger[K, T, V]) prepareResults() map[K]Result[T] { + cacheResults := make(map[K]Result[T], len(c.caches)) + for key, cache := range c.caches { + cacheResults[key] = cache.Get() + } + return cacheResults +} + +// Rerun if: +// - The last run resulted in an error +// - Any of the dependency previously returned an error +// - Any of the dependency just returned an error +// - Any of the dependency's etag changed +func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool { + if c.cacheResults == nil { + return true + } + if c.result.Err != nil { + return true + } + if len(results) != len(c.cacheResults) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) + } + for key, oldResult := range c.cacheResults { + newResult, ok := results[key] + if !ok { + panic(fmt.Errorf("unknown cache entry: %v", key)) + } + + if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { + return true + } + } + return false +} + +func (c *merger[K, T, V]) Get() Result[V] { + cacheResults := c.prepareResults() + if c.needsRunning(cacheResults) { + c.cacheResults = cacheResults + c.result = c.mergeFn(c.cacheResults) + } + return c.result +} + +type transformerCacheKeyType struct{} + +// NewTransformer creates a new cache that transforms the result of +// another cache. The transformFn will only be called if the source +// cache has updated the output, otherwise, the cached result will be +// returned. +// +// If the dependency returned an error before, or it returns an error +// this time, or if the transformerFn failed before, the function is +// reran. +func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { + return NewMerger(func(caches map[transformerCacheKeyType]Result[T]) Result[V] { + cache, ok := caches[transformerCacheKeyType{}] + if len(caches) != 1 || !ok { + panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) + } + return transformerFn(cache) + }, map[transformerCacheKeyType]Data[T]{ + {}: source, + }) +} + +// NewSource creates a new cache that generates some data. This +// will always be called since we don't know the origin of the data and +// if it needs to be updated or not. +func NewSource[T any](sourceFn func() Result[T]) Data[T] { + c := source[T](sourceFn) + return &c +} + +type source[T any] func() Result[T] + +func (c *source[T]) Get() Result[T] { + return (*c)() +} + +// NewStaticSource creates a new cache that always generates the +// same data. This will only be called once (lazily). +func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { + return &static[T]{ + fn: staticFn, + } +} + +type static[T any] struct { + fn func() Result[T] + result *Result[T] +} + +func (c *static[T]) Get() Result[T] { + if c.result == nil { + result := c.fn() + c.result = &result + } + return *c.result +} + +// Replaceable is a cache that carries the result even when the +// cache is replaced. The cache can be replaced atomically (without any +// lock held). This is the type that should typically be stored in +// structs. +type Replaceable[T any] struct { + cache atomic.Pointer[Data[T]] + result *Result[T] +} + +// Get retrieves the data from the underlying source. [Replaceable] +// implements the [Data] interface itself. This is a pass-through +// that calls the most recent underlying cache. If the cache fails but +// previously had returned a success, that success will be returned +// instead. If the cache fails but we never returned a success, that +// failure is returned. +func (c *Replaceable[T]) Get() Result[T] { + result := (*c.cache.Load()).Get() + if result.Err != nil && c.result != nil && c.result.Err == nil { + return *c.result + } + c.result = &result + return *c.result +} + +// Replace changes the cache in a thread-safe way. +func (c *Replaceable[T]) Replace(cache Data[T]) { + c.cache.Swap(&cache) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index e8fbcd1d37..66b7a68da6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -24,7 +24,6 @@ import ( "net/http" "net/url" "path" - "sort" "strconv" "strings" "sync" @@ -32,16 +31,18 @@ import ( "github.com/golang/protobuf/proto" openapi_v3 "github.com/google/gnostic/openapiv3" + "github.com/google/uuid" "github.com/munnerz/goautoneg" + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/internal/handler" "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" ) const ( - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" - subTypeJSON = "json" + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" ) // OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 @@ -57,23 +58,63 @@ type OpenAPIV3DiscoveryGroupVersion struct { ServerRelativeURL string `json:"serverRelativeURL"` } -// OpenAPIService is the service responsible for serving OpenAPI spec. It has -// the ability to safely change the spec while serving it. -type OpenAPIService struct { - // rwMutex protects All members of this service. - rwMutex sync.RWMutex +func ToV3ProtoBinary(json []byte) ([]byte, error) { + document, err := openapi_v3.ParseDocument(json) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +type timedSpec struct { + spec []byte lastModified time.Time - v3Schema map[string]*OpenAPIV3Group } -type OpenAPIV3Group struct { - rwMutex sync.RWMutex +// This type is protected by the lock on OpenAPIService. +type openAPIV3Group struct { + specCache cached.Replaceable[*spec3.OpenAPI] + pbCache cached.Data[timedSpec] + jsonCache cached.Data[timedSpec] +} - lastModified time.Time +func newOpenAPIV3Group() *openAPIV3Group { + o := &openAPIV3Group{} + o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) + } + json, err := json.Marshal(result.Data) + if err != nil { + return cached.NewResultErr[timedSpec](err) + } + return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) + }, &o.specCache) + o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) + } + proto, err := ToV3ProtoBinary(result.Data.spec) + if err != nil { + return cached.NewResultErr[timedSpec](err) + } + return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) + }, o.jsonCache) + return o +} - pbCache handler.HandlerCache - jsonCache handler.HandlerCache - etagCache handler.HandlerCache +func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) { + o.specCache.Replace(openapi) +} + +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + // Mutex protects the schema map. + mutex sync.Mutex + v3Schema map[string]*openAPIV3Group + + discoveryCache cached.Replaceable[timedSpec] } func computeETag(data []byte) string { @@ -92,94 +133,90 @@ func constructServerRelativeURL(gvString, etag string) string { } // NewOpenAPIService builds an OpenAPIService starting with the given spec. -func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { +func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} - o.v3Schema = make(map[string]*OpenAPIV3Group) - return o, nil + o.v3Schema = make(map[string]*openAPIV3Group) + // We're not locked because we haven't shared the structure yet. + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + return o } -func (o *OpenAPIService) getGroupBytes() ([]byte, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - keys := make([]string, len(o.v3Schema)) - i := 0 - for k := range o.v3Schema { - keys[i] = k - i++ +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { + caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema)) + for gvName, group := range o.v3Schema { + caches[gvName] = group.jsonCache } - - sort.Strings(keys) - discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} - for gvString, groupVersion := range o.v3Schema { - etagBytes, err := groupVersion.etagCache.Get() - if err != nil { - return nil, err + return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] { + discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} + for gvName, result := range results { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) + } + discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ + ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), + } } - discovery.Paths[gvString] = OpenAPIV3DiscoveryGroupVersion{ - ServerRelativeURL: constructServerRelativeURL(gvString, string(etagBytes)), + j, err := json.Marshal(discovery) + if err != nil { + return cached.NewResultErr[timedSpec](err) } - } - j, err := json.Marshal(discovery) - if err != nil { - return nil, err - } - return j, nil + return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j)) + }, caches) } func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]byte, string, time.Time, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() + o.mutex.Lock() + defer o.mutex.Unlock() v, ok := o.v3Schema[group] if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } - if getType == subTypeJSON { - specBytes, err := v.jsonCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specBytes, string(etagBytes), v.lastModified, err - } else if getType == subTypeProtobuf { - specPb, err := v.pbCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specPb, string(etagBytes), v.lastModified, err + result := cached.Result[timedSpec]{} + switch getType { + case subTypeJSON: + result = v.jsonCache.Get() + case subTypeProtobuf, subTypeProtobufDeprecated: + result = v.pbCache.Get() + default: + return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } - return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) + return result.Data.spec, result.Etag, result.Data.lastModified, result.Err } -func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - +// UpdateGroupVersionLazy adds or updates an existing group with the new cached. +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) { + o.mutex.Lock() + defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { - o.v3Schema[group] = &OpenAPIV3Group{} + o.v3Schema[group] = newOpenAPIV3Group() + // Since there is a new item, we need to re-build the cache map. + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) } - return o.v3Schema[group].UpdateSpec(openapi) + o.v3Schema[group].UpdateSpec(openapi) } -func (o *OpenAPIService) DeleteGroupVersion(group string) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - delete(o.v3Schema, group) +func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { + o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String())) } -func ToV3ProtoBinary(json []byte) ([]byte, error) { - document, err := openapi_v3.ParseDocument(json) - if err != nil { - return nil, err - } - return proto.Marshal(document) +func (o *OpenAPIService) DeleteGroupVersion(group string) { + o.mutex.Lock() + defer o.mutex.Unlock() + delete(o.v3Schema, group) + // Rebuild the merge cache map since the items have changed. + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) } func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - data, _ := o.getGroupBytes() - w.Header().Set("Etag", strconv.Quote(computeETag(data))) + result := o.discoveryCache.Get() + if result.Err != nil { + klog.Errorf("Error serving discovery: %s", result.Err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Etag", strconv.Quote(result.Etag)) w.Header().Set("Content-Type", "application/json") - http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) + http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { @@ -198,11 +235,13 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque } accepted := []struct { - Type string - SubType string + Type string + SubType string + ReturnedContentType string }{ - {"application", subTypeJSON}, - {"application", subTypeProtobuf}, + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, } for _, clause := range clauses { @@ -217,6 +256,9 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque if err != nil { return } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag w.Header().Set("Etag", strconv.Quote(etag)) @@ -250,30 +292,3 @@ func (o *OpenAPIService) RegisterOpenAPIV3VersionedService(servePath string, han handler.HandlePrefix(servePath+"/", http.HandlerFunc(o.HandleGroupVersion)) return nil } - -func (o *OpenAPIV3Group) UpdateSpec(openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - - o.jsonCache = o.jsonCache.New(func() ([]byte, error) { - return json.Marshal(openapi) - }) - o.pbCache = o.pbCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return ToV3ProtoBinary(json) - }) - // TODO: This forces a json marshal of corresponding group-versions. - // We should look to replace this with a faster hashing mechanism. - o.etagCache = o.etagCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return []byte(computeETag(json)), nil - }) - o.lastModified = time.Now() - return nil -} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index 3ff3c8d894..bef6037823 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -18,3 +18,7 @@ package internal // Used by tests to selectively disable experimental JSON unmarshaler var UseOptimizedJSONUnmarshaling bool = true +var UseOptimizedJSONUnmarshalingV3 bool = true + +// Used by tests to selectively disable experimental JSON marshaler +var UseOptimizedJSONMarshaling bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go b/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go deleted file mode 100644 index e128c26ebe..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handler - -import ( - "sync" -) - -// HandlerCache represents a lazy cache for generating a byte array -// It is used to lazily marshal OpenAPI v2/v3 and lazily generate the ETag -type HandlerCache struct { - BuildCache func() ([]byte, error) - once sync.Once - bytes []byte - err error -} - -// Get either returns the cached value or calls BuildCache() once before caching and returning -// its results. If BuildCache returns an error, the last valid value for the cache (from prior -// calls to New()) is used instead if possible. -func (c *HandlerCache) Get() ([]byte, error) { - c.once.Do(func() { - bytes, err := c.BuildCache() - // if there is an error updating the cache, there can be situations where - // c.bytes contains a valid value (carried over from the previous update) - // but c.err is also not nil; the cache user is expected to check for this - c.err = err - if c.err == nil { - // don't override previous spec if we had an error - c.bytes = bytes - } - }) - return c.bytes, c.err -} - -// New creates a new HandlerCache for situations where a cache refresh is needed. -// This function is not thread-safe and should not be called at the same time as Get(). -func (c *HandlerCache) New(cacheBuilder func() ([]byte, error)) HandlerCache { - return HandlerCache{ - bytes: c.bytes, - BuildCache: cacheBuilder, - } -} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go new file mode 100644 index 0000000000..7393bacf70 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/go-openapi/jsonreference" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// DeterministicMarshal calls the jsonv2 library with the deterministic +// flag in order to have stable marshaling. +func DeterministicMarshal(in any) ([]byte, error) { + return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in) +} + +// JSONRefFromMap populates a json reference object if the map v contains a $ref key. +func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *jsonRef = ref + } + } + return nil +} + +// SanitizeExtensions sanitizes the input map such that non extension +// keys (non x-*, X-*) keys are dropped from the map. Returns the new +// modified map, or nil if the map is now empty. +func SanitizeExtensions(e map[string]interface{}) map[string]interface{} { + for k := range e { + if !IsExtensionKey(k) { + delete(e, k) + } + } + if len(e) == 0 { + e = nil + } + return e +} + +// IsExtensionKey returns true if the input string is of format x-* or X-* +func IsExtensionKey(k string) bool { + return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go index febde20f9c..e6c6216ff3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go @@ -34,6 +34,13 @@ type MarshalOptions struct { // unknown JSON object members. DiscardUnknownMembers bool + // Deterministic specifies that the same input value will be serialized + // as the exact same output bytes. Different processes of + // the same program will serialize equal values to the same bytes, + // but different versions of the same program are not guaranteed + // to produce the exact same sequence of bytes. + Deterministic bool + // formatDepth is the depth at which we respect the format flag. formatDepth int // format is custom formatting for the value at the specified depth. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go index 204d0648dd..c62b1f3203 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go @@ -62,7 +62,7 @@ func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) { } return dec.stringCache.make(val), nil case '0': - fv, _ := parseFloat(val, 64) // ignore error since readValue gaurantees val is valid + fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid return fv, nil default: panic("BUG: invalid kind: " + k.String()) @@ -99,13 +99,32 @@ func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error if !enc.options.AllowInvalidUTF8 { enc.tokens.last.disableNamespace() } - for name, val := range obj { - if err := enc.WriteToken(String(name)); err != nil { - return err + if !mo.Deterministic || len(obj) <= 1 { + for name, val := range obj { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } } - if err := marshalValueAny(mo, enc, val); err != nil { - return err + } else { + names := getStrings(len(obj)) + var i int + for name := range obj { + (*names)[i] = name + i++ + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, obj[name]); err != nil { + return err + } } + putStrings(names) } if err := enc.WriteToken(ObjectEnd); err != nil { return err diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go index fcf3d50000..fd26eba352 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go @@ -5,6 +5,7 @@ package json import ( + "bytes" "encoding/base32" "encoding/base64" "encoding/hex" @@ -12,6 +13,7 @@ import ( "fmt" "math" "reflect" + "sort" "strconv" "sync" ) @@ -228,13 +230,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { } } val := enc.UnusedBuffer() - var b []byte - if va.Kind() == reflect.Array { - // TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice. - b = va.Slice(0, va.Len()).Bytes() - } else { - b = va.Bytes() - } + b := va.Bytes() n := len(`"`) + encodedLen(len(b)) + len(`"`) if cap(val) < n { val = make([]byte, n) @@ -248,19 +244,19 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { } unmarshalDefault := fncs.unmarshal fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { - decode, decodedLen := decodeBase64, decodedLenBase64 + decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64 if uo.format != "" && uo.formatDepth == dec.tokens.depth() { switch uo.format { case "base64": - decode, decodedLen = decodeBase64, decodedLenBase64 + decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64 case "base64url": - decode, decodedLen = decodeBase64URL, decodedLenBase64URL + decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL case "base32": - decode, decodedLen = decodeBase32, decodedLenBase32 + decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32 case "base32hex": - decode, decodedLen = decodeBase32Hex, decodedLenBase32Hex + decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex case "base16", "hex": - decode, decodedLen = decodeBase16, decodedLenBase16 + decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16 case "array": uo.format = "" return unmarshalDefault(uo, dec, va) @@ -290,23 +286,28 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { n-- } n = decodedLen(n) - var b []byte + b := va.Bytes() if va.Kind() == reflect.Array { - // TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice. - b = va.Slice(0, va.Len()).Bytes() if n != len(b) { err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b)) return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } } else { - b = va.Bytes() if b == nil || cap(b) < n { b = make([]byte, n) } else { b = b[:n] } } - if _, err := decode(b, val); err != nil { + n2, err := decode(b, val) + if err == nil && len(val) != encodedLen(n2) { + // TODO(https://go.dev/issue/53845): RFC 4648, section 3.3, + // specifies that non-alphabet characters must be rejected. + // Unfortunately, the "base32" and "base64" packages allow + // '\r' and '\n' characters by default. + err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n"))) + } + if err != nil { return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } if va.Kind() == reflect.Slice { @@ -412,7 +413,7 @@ func makeUintArshaler(t reflect.Type) *arshaler { return nil } - x := math.Float64frombits(uint64(va.Uint())) + x := math.Float64frombits(va.Uint()) return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers) } fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { @@ -450,7 +451,7 @@ func makeUintArshaler(t reflect.Type) *arshaler { err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange) return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } - va.SetUint(uint64(n)) + va.SetUint(n) return nil } return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} @@ -549,23 +550,9 @@ func makeFloatArshaler(t reflect.Type) *arshaler { return &fncs } -var mapIterPool = sync.Pool{ - New: func() any { return new(reflect.MapIter) }, -} - -func getMapIter(mv reflect.Value) *reflect.MapIter { - iter := mapIterPool.Get().(*reflect.MapIter) - iter.Reset(mv) - return iter -} -func putMapIter(iter *reflect.MapIter) { - iter.Reset(reflect.Value{}) // allow underlying map to be garbage collected - mapIterPool.Put(iter) -} - func makeMapArshaler(t reflect.Type) *arshaler { // NOTE: The logic below disables namespaces for tracking duplicate names - // when handling map keys with a unique represention. + // when handling map keys with a unique representation. // NOTE: Values retrieved from a map are not addressable, // so we shallow copy the values to make them addressable and @@ -641,24 +628,76 @@ func makeMapArshaler(t reflect.Type) *arshaler { enc.tokens.last.disableNamespace() } - // NOTE: Map entries are serialized in a non-deterministic order. - // Users that need stable output should call RawValue.Canonicalize. - // TODO(go1.19): Remove use of a sync.Pool with reflect.MapIter. - // Calling reflect.Value.MapRange no longer allocates. - // See https://go.dev/cl/400675. - iter := getMapIter(va.Value) - defer putMapIter(iter) - for iter.Next() { - k.SetIterKey(iter) - if err := marshalKey(mko, enc, k); err != nil { - // TODO: If err is errMissingName, then wrap it as a - // SemanticError since this key type cannot be serialized - // as a JSON string. - return err + switch { + case !mo.Deterministic || n <= 1: + for iter := va.Value.MapRange(); iter.Next(); { + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + v.SetIterValue(iter) + if err := marshalVal(mo, enc, v); err != nil { + return err + } } - v.SetIterValue(iter) - if err := marshalVal(mo, enc, v); err != nil { - return err + case !nonDefaultKey && t.Key().Kind() == reflect.String: + names := getStrings(n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + k.SetIterKey(iter) + (*names)[i] = k.String() + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + k.SetString(name) + v.Set(va.MapIndex(k.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + putStrings(names) + default: + type member struct { + name string // unquoted name + key addressableValue + } + members := make([]member, n) + keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + // Marshal the member name. + k := addressableValue{keys.Index(i)} // indexed slice element is always addressable + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + name := enc.unwriteOnlyObjectMemberName() + members[i] = member{name, k} + } + // TODO: If AllowDuplicateNames is enabled, then sort according + // to reflect.Value as well if the names are equal. + // See internal/fmtsort. + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Slice(members, func(i, j int) bool { + return lessUTF16(members[i].name, members[j].name) + }) + for _, member := range members { + if err := enc.WriteToken(String(member.name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + v.Set(va.MapIndex(member.key.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } } } } @@ -856,7 +895,7 @@ func makeStructArshaler(t reflect.Type) *arshaler { // 2. The object namespace is guaranteed to be disabled. // 3. The object name is guaranteed to be valid and pre-escaped. // 4. There is no need to flush the buffer (for unwrite purposes). - // 5. There is no possibility of an error occuring. + // 5. There is no possibility of an error occurring. if optimizeCommon { // Append any delimiters or optional whitespace. if enc.tokens.last.length() > 0 { @@ -996,7 +1035,7 @@ func makeStructArshaler(t reflect.Type) *arshaler { if fields.inlinedFallback == nil { // Skip unknown value since we have no place to store it. - if err := dec.skipValue(); err != nil { + if err := dec.SkipValue(); err != nil { return err } } else { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go index 7476eda301..258a98247d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go @@ -5,6 +5,7 @@ package json import ( + "bytes" "errors" "reflect" ) @@ -89,35 +90,61 @@ func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableVa } return nil } else { - if v.Len() == 0 { + m := v // must be a map[string]V + n := m.Len() + if n == 0 { return nil } - m := v + mk := newAddressableValue(stringType) mv := newAddressableValue(m.Type().Elem()) - for iter := m.MapRange(); iter.Next(); { - b, err := appendString(enc.UnusedBuffer(), iter.Key().String(), !enc.options.AllowInvalidUTF8, nil) + marshalKey := func(mk addressableValue) error { + b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil) if err != nil { return err } if insertUnquotedName != nil { - isVerbatim := consumeSimpleString(b) == len(b) + isVerbatim := bytes.IndexByte(b, '\\') < 0 name := unescapeStringMayCopy(b, isVerbatim) if !insertUnquotedName(name) { return &SyntacticError{str: "duplicate name " + string(b) + " in object"} } } - if err := enc.WriteValue(b); err != nil { - return err + return enc.WriteValue(b) + } + marshalVal := f.fncs.marshal + if mo.Marshalers != nil { + marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type()) + } + if !mo.Deterministic || n <= 1 { + for iter := m.MapRange(); iter.Next(); { + mk.SetIterKey(iter) + if err := marshalKey(mk); err != nil { + return err + } + mv.Set(iter.Value()) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } } - - mv.Set(iter.Value()) - marshal := f.fncs.marshal - if mo.Marshalers != nil { - marshal, _ = mo.Marshalers.lookup(marshal, mv.Type()) + } else { + names := getStrings(n) + for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ { + mk.SetIterKey(iter) + (*names)[i] = mk.String() } - if err := marshal(mo, enc, mv); err != nil { - return err + names.Sort() + for _, name := range *names { + mk.SetString(name) + if err := marshalKey(mk); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf. + mv.Set(m.MapIndex(mk.Value)) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } } + putStrings(names) } return nil } @@ -162,7 +189,7 @@ func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressa } else { name := string(unquotedName) // TODO: Intern this? - m := v + m := v // must be a map[string]V if m.IsNil() { m.Set(reflect.MakeMap(m.Type())) } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go index ef4e1f5e3d..20899c868c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go @@ -21,8 +21,8 @@ var ( ) // MarshalerV1 is implemented by types that can marshal themselves. -// It is recommended that types implement MarshalerV2 unless -// the implementation is trying to avoid a hard dependency on this package. +// It is recommended that types implement MarshalerV2 unless the implementation +// is trying to avoid a hard dependency on the "jsontext" package. // // It is recommended that implementations return a buffer that is safe // for the caller to retain and potentially mutate. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go index 22e802221e..fc8d5b0070 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go @@ -5,6 +5,7 @@ package json import ( + "errors" "fmt" "reflect" "strings" @@ -85,25 +86,39 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { fncs.nonDefault = true fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { format := time.RFC3339Nano + isRFC3339 := true if mo.format != "" && mo.formatDepth == enc.tokens.depth() { var err error - format, err = checkTimeFormat(mo.format) + format, isRFC3339, err = checkTimeFormat(mo.format) if err != nil { return &SemanticError{action: "marshal", GoType: t, Err: err} } } tt := va.Interface().(time.Time) - if y := tt.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See https://go.dev/issue/4556#c15 for more discussion. - err := fmt.Errorf("year %d outside of range [0,9999]", y) - return &SemanticError{action: "marshal", GoType: t, Err: err} - } b := enc.UnusedBuffer() b = append(b, '"') b = tt.AppendFormat(b, format) b = append(b, '"') + if isRFC3339 { + // Not all Go timestamps can be represented as valid RFC 3339. + // Explicitly check for these edge cases. + // See https://go.dev/issue/4556 and https://go.dev/issue/54580. + var err error + switch b := b[len(`"`) : len(b)-len(`"`)]; { + case b[len("9999")] != '-': // year must be exactly 4 digits wide + err = errors.New("year outside of range [0,9999]") + case b[len(b)-1] != 'Z': + c := b[len(b)-len("Z07:00")] + if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 { + err = errors.New("timezone hour outside of range [0,23]") + } + } + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteValue(b) // RFC 3339 never needs JSON escaping + } // The format may contain special characters that need escaping. // Verify that the result is a valid JSON string (common case), // otherwise escape the string correctly (slower case). @@ -113,10 +128,11 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { return enc.WriteValue(b) } fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { - format := time.RFC3339Nano + format := time.RFC3339 + isRFC3339 := true if uo.format != "" && uo.formatDepth == dec.tokens.depth() { var err error - format, err = checkTimeFormat(uo.format) + format, isRFC3339, err = checkTimeFormat(uo.format) if err != nil { return &SemanticError{action: "unmarshal", GoType: t, Err: err} } @@ -136,6 +152,29 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { case '"': val = unescapeStringMayCopy(val, flags.isVerbatim()) tt2, err := time.Parse(format, string(val)) + if isRFC3339 && err == nil { + // TODO(https://go.dev/issue/54580): RFC 3339 specifies + // the exact grammar of a valid timestamp. However, + // the parsing functionality in "time" is too loose and + // incorrectly accepts invalid timestamps as valid. + // Remove these manual checks when "time" checks it for us. + newParseError := func(layout, value, layoutElem, valueElem, message string) error { + return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message} + } + switch { + case val[len("2006-01-02T")+1] == ':': // hour must be two digits + err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "") + case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + err = newParseError(format, string(val), ".", ",", "") + case val[len(val)-1] != 'Z': + switch { + case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range") + case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range") + } + } + } if err != nil { return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } @@ -149,48 +188,54 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { return fncs } -func checkTimeFormat(format string) (string, error) { +func checkTimeFormat(format string) (string, bool, error) { // We assume that an exported constant in the time package will // always start with an uppercase ASCII letter. if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' { switch format { case "ANSIC": - return time.ANSIC, nil + return time.ANSIC, false, nil case "UnixDate": - return time.UnixDate, nil + return time.UnixDate, false, nil case "RubyDate": - return time.RubyDate, nil + return time.RubyDate, false, nil case "RFC822": - return time.RFC822, nil + return time.RFC822, false, nil case "RFC822Z": - return time.RFC822Z, nil + return time.RFC822Z, false, nil case "RFC850": - return time.RFC850, nil + return time.RFC850, false, nil case "RFC1123": - return time.RFC1123, nil + return time.RFC1123, false, nil case "RFC1123Z": - return time.RFC1123Z, nil + return time.RFC1123Z, false, nil case "RFC3339": - return time.RFC3339, nil + return time.RFC3339, true, nil case "RFC3339Nano": - return time.RFC3339Nano, nil + return time.RFC3339Nano, true, nil case "Kitchen": - return time.Kitchen, nil + return time.Kitchen, false, nil case "Stamp": - return time.Stamp, nil + return time.Stamp, false, nil case "StampMilli": - return time.StampMilli, nil + return time.StampMilli, false, nil case "StampMicro": - return time.StampMicro, nil + return time.StampMicro, false, nil case "StampNano": - return time.StampNano, nil + return time.StampNano, false, nil default: // Reject any format that is an exported Go identifier in case // new format constants are added to the time package. if strings.TrimFunc(format, isLetterOrDigit) == "" { - return "", fmt.Errorf("undefined format layout: %v", format) + return "", false, fmt.Errorf("undefined format layout: %v", format) } } } - return format, nil + return format, false, nil +} + +// parseDec2 parses b as an unsigned, base-10, 2-digit number. +// It panics if len(b) < 2. The result is undefined if digits are not base-10. +func parseDec2(b []byte) byte { + return 10*(b[0]-'0') + (b[1] - '0') } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go index 998ad68fc0..0d68b32338 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go @@ -347,9 +347,9 @@ func (d *Decoder) PeekKind() Kind { return next } -// skipValue is semantically equivalent to calling ReadValue and discarding +// SkipValue is semantically equivalent to calling ReadValue and discarding // the result except that memory is not wasted trying to hold the entire result. -func (d *Decoder) skipValue() error { +func (d *Decoder) SkipValue() error { switch d.PeekKind() { case '{', '[': // For JSON objects and arrays, keep skipping all tokens @@ -374,7 +374,7 @@ func (d *Decoder) skipValue() error { } // ReadToken reads the next Token, advancing the read offset. -// The returned token is only valid until the next Peek or Read call. +// The returned token is only valid until the next Peek, Read, or Skip call. // It returns io.EOF if there are no more tokens. func (d *Decoder) ReadToken() (Token, error) { // Determine the next kind. @@ -585,7 +585,7 @@ func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 } // ReadValue returns the next raw JSON value, advancing the read offset. // The value is stripped of any leading or trailing whitespace. -// The returned value is only valid until the next Peek or Read call and +// The returned value is only valid until the next Peek, Read, or Skip call and // may not be mutated while the Decoder remains in use. // If the decoder is currently at the end token for an object or array, // then it reports a SyntacticError and the internal state remains unchanged. @@ -1013,7 +1013,7 @@ func (d *Decoder) InputOffset() int64 { // UnreadBuffer returns the data remaining in the unread buffer, // which may contain zero or more bytes. // The returned buffer must not be mutated while Decoder continues to be used. -// The buffer contents are valid until the next Peek or Read call. +// The buffer contents are valid until the next Peek, Read, or Skip call. func (d *Decoder) UnreadBuffer() []byte { return d.unreadBuffer() } @@ -1213,7 +1213,7 @@ func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, valid return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} } // Only certain control characters can use the \uFFFF notation - // for canonical formating (per RFC 8785, section 3.2.2.2.). + // for canonical formatting (per RFC 8785, section 3.2.2.2.). switch v1 { // \uFFFF notation not permitted for these characters. case '\b', '\f', '\n', '\r', '\t': diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go index 5f98a8409e..5b81ca15af 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go @@ -347,6 +347,30 @@ func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool { return true } +// unwriteOnlyObjectMemberName unwrites the only object member name +// and returns the unquoted name. +func (e *Encoder) unwriteOnlyObjectMemberName() string { + if last := e.tokens.last; !last.isObject() || last.length() != 1 { + panic("BUG: must be called on an object after writing first name") + } + + // Unwrite the name and whitespace. + b := trimSuffixString(e.buf) + isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0 + name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim)) + e.buf = trimSuffixWhitespace(b) + + // Undo state changes. + e.tokens.last.decrement() + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + } + return name +} + func trimSuffixWhitespace(b []byte) []byte { // NOTE: The arguments and logic are kept simple to keep this inlineable. n := len(b) - 1 diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go index f722822117..60e93270fb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go @@ -8,6 +8,7 @@ import ( "bytes" "io" "math/bits" + "sort" "sync" ) @@ -148,3 +149,34 @@ func putStreamingDecoder(d *Decoder) { streamingDecoderPool.Put(d) } } + +var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }} + +type stringSlice []string + +// getStrings returns a non-nil pointer to a slice with length n. +func getStrings(n int) *stringSlice { + s := stringsPools.Get().(*stringSlice) + if cap(*s) < n { + *s = make([]string, n) + } + *s = (*s)[:n] + return s +} + +func putStrings(s *stringSlice) { + if cap(*s) > 1<<10 { + *s = nil // avoid pinning arbitrarily large amounts of memory + } + stringsPools.Put(s) +} + +// Sort sorts the string slice according to RFC 8785, section 3.2.3. +func (ss *stringSlice) Sort() { + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Sort(ss) +} + +func (ss *stringSlice) Len() int { return len(*ss) } +func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) } +func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go index d9c33f2b4b..ee14c753fe 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go @@ -721,7 +721,7 @@ func (s *uintSet) has(i uint) bool { return s.lo.has(i) } else { i -= 64 - iHi, iLo := int(i/64), uint(i%64) + iHi, iLo := int(i/64), i%64 return iHi < len(s.hi) && s.hi[iHi].has(iLo) } } @@ -735,7 +735,7 @@ func (s *uintSet) insert(i uint) bool { return !has } else { i -= 64 - iHi, iLo := int(i/64), uint(i%64) + iHi, iLo := int(i/64), i%64 if iHi >= len(s.hi) { s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...) s.hi = s.hi[:cap(s.hi)] diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go index 08509c296b..9acba7dadf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go @@ -112,7 +112,7 @@ func Bool(b bool) Token { return False } -// String construct a Token representing a JSON string. +// String constructs a Token representing a JSON string. // The provided string should contain valid UTF-8, otherwise invalid characters // may be mangled as the Unicode replacement character. func String(s string) Token { @@ -225,7 +225,7 @@ func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRu } // String returns the unescaped string value for a JSON string. -// For other JSON kinds, this returns the raw JSON represention. +// For other JSON kinds, this returns the raw JSON representation. func (t Token) String() string { // This is inlinable to take advantage of "function outlining". // This avoids an allocation for the string(b) conversion @@ -373,10 +373,10 @@ func (t Token) Int() int64 { case 'i': return int64(t.num) case 'u': - if uint64(t.num) > maxInt64 { + if t.num > maxInt64 { return maxInt64 } - return int64(uint64(t.num)) + return int64(t.num) } } @@ -425,7 +425,7 @@ func (t Token) Uint() uint64 { // Handle exact integer value. switch t.str[0] { case 'u': - return uint64(t.num) + return t.num case 'i': if int64(t.num) < minUint64 { return minUint64 diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go index fe88e4fb5e..e0bd1b31d7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go @@ -263,7 +263,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) { afterValue := d.InputOffset() if isSorted && len(*members) > 0 { - isSorted = lessUTF16(prevName, name) + isSorted = lessUTF16(prevName, []byte(name)) } *members = append(*members, memberName{name, beforeName, afterValue}) prevName = name @@ -317,7 +317,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) { // to the UTF-16 codepoints of the UTF-8 encoded input strings. // This implements the ordering specified in RFC 8785, section 3.2.3. // The inputs must be valid UTF-8, otherwise this may panic. -func lessUTF16(x, y []byte) bool { +func lessUTF16[Bytes []byte | string](x, y Bytes) bool { // NOTE: This is an optimized, allocation-free implementation // of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the // two implementations agree on the result of comparing any two strings. @@ -326,8 +326,13 @@ func lessUTF16(x, y []byte) bool { return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF') } + var invalidUTF8 bool + x0, y0 := x, y for { if len(x) == 0 || len(y) == 0 { + if len(x) == len(y) && invalidUTF8 { + return string(x0) < string(y0) + } return len(x) < len(y) } @@ -341,35 +346,36 @@ func lessUTF16(x, y []byte) bool { } // Decode next pair of runes as UTF-8. - rx, nx := utf8.DecodeRune(x) - ry, ny := utf8.DecodeRune(y) - switch { - - // Both runes encode as either a single or surrogate pair - // of UTF-16 codepoints. - case isUTF16Self(rx) == isUTF16Self(ry): - if rx != ry { - return rx < ry - } + // TODO(https://go.dev/issue/56948): Use a generic implementation + // of utf8.DecodeRune, or rely on a compiler optimization to statically + // hide the cost of a type switch (https://go.dev/issue/57072). + var rx, ry rune + var nx, ny int + switch any(x).(type) { + case string: + rx, nx = utf8.DecodeRuneInString(string(x)) + ry, ny = utf8.DecodeRuneInString(string(y)) + case []byte: + rx, nx = utf8.DecodeRune([]byte(x)) + ry, ny = utf8.DecodeRune([]byte(y)) + } + selfx := isUTF16Self(rx) + selfy := isUTF16Self(ry) + switch { // The x rune is a single UTF-16 codepoint, while // the y rune is a surrogate pair of UTF-16 codepoints. - case isUTF16Self(rx): - ry, _ := utf16.EncodeRune(ry) - if rx != ry { - return rx < ry - } - panic("BUG: invalid UTF-8") // implies rx is an unpaired surrogate half - + case selfx && !selfy: + ry, _ = utf16.EncodeRune(ry) // The y rune is a single UTF-16 codepoint, while // the x rune is a surrogate pair of UTF-16 codepoints. - case isUTF16Self(ry): - rx, _ := utf16.EncodeRune(rx) - if rx != ry { - return rx < ry - } - panic("BUG: invalid UTF-8") // implies ry is an unpaired surrogate half + case selfy && !selfx: + rx, _ = utf16.EncodeRune(rx) + } + if rx != ry { + return rx < ry } + invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1) x, y = x[nx:], y[ny:] } } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 2e2f3d76f3..699291f1d8 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -41,6 +44,9 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { } func (e *Encoding) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.EncodingProps); err != nil { return err } @@ -50,6 +56,20 @@ func (e *Encoding) UnmarshalJSON(data []byte) error { return nil } +func (e *Encoding) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + EncodingProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.EncodingProps = x.EncodingProps + return nil +} + type EncodingProps struct { // Content Type for encoding a specific property ContentType string `json:"contentType,omitempty"` diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 84e21d7232..03b8727170 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -20,6 +20,9 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -49,6 +52,9 @@ func (e *Example) MarshalJSON() ([]byte, error) { } func (e *Example) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.Refable); err != nil { return err } @@ -61,6 +67,23 @@ func (e *Example) UnmarshalJSON(data []byte) error { return nil } +func (e *Example) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExampleProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&e.Ref.Ref, x.Extensions); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExampleProps = x.ExampleProps + + return nil +} + type ExampleProps struct { // Summary holds a short description of the example Summary string `json:"summary,omitempty"` diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index 065f4887bf..e79956721a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -48,6 +51,9 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { } func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { return err } @@ -56,3 +62,16 @@ func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { } return nil } + +func (e *ExternalDocumentation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExternalDocumentationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExternalDocumentationProps = x.ExternalDocumentationProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go new file mode 100644 index 0000000000..bc19dd48ed --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -0,0 +1,254 @@ +package spec3 + +import ( + "math/rand" + "strings" + + fuzz "github.com/google/gofuzz" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// refChance is the chance that a particular component will use a $ref +// instead of fuzzed. Expressed as a fraction 1/n, currently there is +// a 1/3 chance that a ref will be used. +const refChance = 3 + +const alphaNumChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randAlphanumString() string { + arr := make([]string, rand.Intn(10)+5) + for i := 0; i < len(arr); i++ { + arr[i] = string(alphaNumChars[rand.Intn(len(alphaNumChars))]) + } + return strings.Join(arr, "") +} + +var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ + func(s *string, c fuzz.Continue) { + // All OpenAPI V3 map keys must follow the corresponding + // regex. Note that this restricts the range for all other + // string values as well. + str := randAlphanumString() + *s = str + }, + func(o *OpenAPI, c fuzz.Continue) { + c.FuzzNoCustom(o) + o.Version = "3.0.0" + }, + func(r *interface{}, c fuzz.Continue) { + switch c.Intn(3) { + case 0: + *r = nil + case 1: + n := c.RandString() + "x" + *r = n + case 2: + n := c.Float64() + *r = n + } + }, + func(v **spec.Info, c fuzz.Continue) { + // Info is never nil + *v = &spec.Info{} + c.FuzzNoCustom(*v) + (*v).Title = c.RandString() + "x" + }, + func(v *Paths, c fuzz.Continue) { + c.Fuzz(&v.VendorExtensible) + num := c.Intn(5) + if num > 0 { + v.Paths = make(map[string]*Path) + } + for i := 0; i < num; i++ { + val := Path{} + c.Fuzz(&val) + v.Paths["/"+c.RandString()] = &val + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + switch c.Intn(4) { + case 0: + v.Type = "apiKey" + v.Name = c.RandString() + "x" + switch c.Intn(3) { + case 0: + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + case 1: + v.Type = "http" + case 2: + v.Type = "oauth2" + v.Flows = make(map[string]*OAuthFlow) + flow := OAuthFlow{} + flow.AuthorizationUrl = c.RandString() + "x" + v.Flows["implicit"] = &flow + flow.Scopes = make(map[string]string) + flow.Scopes["foo"] = "bar" + case 3: + v.Type = "openIdConnect" + v.OpenIdConnectUrl = "https://" + c.RandString() + } + v.Scheme = "basic" + }, + func(v *spec.Ref, c fuzz.Continue) { + switch c.Intn(7) { + case 0: + *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) + case 1: + *v = spec.MustCreateRef("#/components/responses/" + randAlphanumString()) + case 2: + *v = spec.MustCreateRef("#/components/headers/" + randAlphanumString()) + case 3: + *v = spec.MustCreateRef("#/components/securitySchemes/" + randAlphanumString()) + case 5: + *v = spec.MustCreateRef("#/components/parameters/" + randAlphanumString()) + case 6: + *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) + } + }, + func(v *Parameter, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ParameterProps) + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + // Header param + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + }, + func(v *RequestBody, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.RequestBodyProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Header, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.HeaderProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *ResponsesProps, c fuzz.Continue) { + c.Fuzz(&v.Default) + n := c.Intn(5) + for i := 0; i < n; i++ { + r2 := Response{} + c.Fuzz(&r2) + // HTTP Status code in 100-599 Range + code := c.Intn(500) + 100 + v.StatusCodeResponses = make(map[int]*Response) + v.StatusCodeResponses[code] = &r2 + } + }, + func(v *Response, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ResponseProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *spec.Extensions, c fuzz.Continue) { + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = spec.Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *spec.ExternalDocumentation, c fuzz.Continue) { + c.Fuzz(&v.Description) + v.URL = "https://" + randAlphanumString() + }, + func(v *spec.SchemaURL, c fuzz.Continue) { + *v = spec.SchemaURL("https://" + randAlphanumString()) + }, + func(v *spec.SchemaOrBool, c fuzz.Continue) { + *v = spec.SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &spec.Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v *spec.SchemaOrArray, c fuzz.Continue) { + *v = spec.SchemaOrArray{} + if c.RandBool() { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schema = &schema + } else { + v.Schemas = []spec.Schema{} + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schemas = append(v.Schemas, schema) + } + + } + + }, + func(v *spec.SchemaOrStringArray, c fuzz.Continue) { + if c.RandBool() { + *v = spec.SchemaOrStringArray{} + if c.RandBool() { + c.Fuzz(&v.Property) + } else { + c.Fuzz(&v.Schema) + } + } + }, + func(v *spec.Schema, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Ref) + return + } + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = spec.StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.ExtraProps) + } + + }, +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index cead4b15d1..ee5a30f797 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,9 @@ func (h *Header) MarshalJSON() ([]byte, error) { } func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, h) + } if err := json.Unmarshal(data, &h.Refable); err != nil { return err } @@ -63,6 +68,22 @@ func (h *Header) UnmarshalJSON(data []byte) error { return nil } +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + HeaderProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&h.Ref.Ref, x.Extensions); err != nil { + return err + } + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + return nil +} + // HeaderProps a struct that describes a header object type HeaderProps struct { // Description holds a brief description of the parameter diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index d502a465c3..d390e69bcf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -44,6 +47,9 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { } func (m *MediaType) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, m) + } if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { return err } @@ -53,6 +59,20 @@ func (m *MediaType) UnmarshalJSON(data []byte) error { return nil } +func (m *MediaType) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + MediaTypeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + m.Extensions = internal.SanitizeExtensions(x.Extensions) + m.MediaTypeProps = x.MediaTypeProps + + return nil +} + // MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject type MediaTypeProps struct { // Schema holds the schema defining the type used for the media type diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index 09ce7eaf12..28230610bd 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -46,12 +48,28 @@ func (o *Operation) MarshalJSON() ([]byte, error) { // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, o) + } if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } return json.Unmarshal(data, &o.VendorExtensible) } +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + OperationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = x.OperationProps + return nil +} + // OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject type OperationProps struct { // Tags holds a list of tags for API documentation control diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 0d7180e506..613da71a6d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,10 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { } func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -63,6 +69,22 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return nil } +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ParameterProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParameterProps = x.ParameterProps + return nil +} + // ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject type ParameterProps struct { // Name holds the name of the parameter diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index 4a0cae2a4c..40d9061ace 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -18,9 +18,12 @@ package spec3 import ( "encoding/json" + "fmt" "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -45,6 +48,9 @@ func (p *Paths) MarshalJSON() ([]byte, error) { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { return err @@ -74,6 +80,59 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *p = Paths{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi := Path{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + p.Paths[k] = &pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject // // Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible @@ -101,6 +160,9 @@ func (p *Path) MarshalJSON() ([]byte, error) { } func (p *Path) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -113,6 +175,24 @@ func (p *Path) UnmarshalJSON(data []byte) error { return nil } +func (p *Path) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + PathProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathProps = x.PathProps + + return nil +} + // PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject type PathProps struct { // Summary holds a summary for all operations in this path diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index c00c043bd0..33267ce675 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,9 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { } func (r *RequestBody) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -71,3 +76,19 @@ type RequestBodyProps struct { // Required determines if the request body is required in the request Required bool `json:"required,omitempty"` } + +func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + RequestBodyProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.RequestBodyProps = x.RequestBodyProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index 9be7665608..95b388e6c6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -18,9 +18,12 @@ package spec3 import ( "encoding/json" + "fmt" "strconv" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -46,13 +49,15 @@ func (r *Responses) MarshalJSON() ([]byte, error) { } func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } - return nil } @@ -78,25 +83,91 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]*Response + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]*Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = &value } } return nil } +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *r = Responses{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + r.ResponsesProps.Default = &resp + default: + if nk, err := strconv.Atoi(k); err == nil { + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + r.StatusCodeResponses[nk] = &resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject // // Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible @@ -124,6 +195,9 @@ func (r *Response) MarshalJSON() ([]byte, error) { } func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -133,7 +207,22 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } + return nil +} +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ResponseProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } @@ -174,6 +263,9 @@ func (r *Link) MarshalJSON() ([]byte, error) { } func (r *Link) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -187,6 +279,22 @@ func (r *Link) UnmarshalJSON(data []byte) error { return nil } +func (l *Link) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + LinkProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&l.Ref.Ref, x.Extensions); err != nil { + return err + } + l.Extensions = internal.SanitizeExtensions(x.Extensions) + l.LinkProps = x.LinkProps + return nil +} + // LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject type LinkProps struct { // OperationId is the name of an existing, resolvable OAS operation diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index 77104dff37..d5df0a7811 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +53,10 @@ func (s *Server) MarshalJSON() ([]byte, error) { } func (s *Server) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } + if err := json.Unmarshal(data, &s.ServerProps); err != nil { return err } @@ -59,6 +66,20 @@ func (s *Server) UnmarshalJSON(data []byte) error { return nil } +func (s *Server) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerProps = x.ServerProps + + return nil +} + type ServerVariable struct { ServerVariableProps spec.VendorExtensible @@ -87,6 +108,9 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { } func (s *ServerVariable) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { return err } @@ -95,3 +119,17 @@ func (s *ServerVariable) UnmarshalJSON(data []byte) error { } return nil } + +func (s *ServerVariable) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerVariableProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerVariableProps = x.ServerVariableProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index 3ff48a3c3d..bed096fb76 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -17,6 +17,10 @@ limitations under the License. package spec3 import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -35,3 +39,12 @@ type OpenAPI struct { // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } + +func (o *OpenAPI) UnmarshalJSON(data []byte) error { + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, &p) + } + return json.Unmarshal(data, &p) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go index 9a2556306a..05310c46b3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go @@ -43,6 +43,9 @@ type Header struct { // MarshalJSON marshal this to JSON func (h Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.CommonValidations) if err != nil { return nil, err @@ -62,6 +65,20 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4), nil } +func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Extensions + HeaderProps + } + x.CommonValidations = commonValidationsOmitZero(h.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = h.HeaderProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -94,12 +111,8 @@ func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec h.CommonValidations = x.CommonValidations h.SimpleSchema = x.SimpleSchema - h.Extensions = x.Extensions + h.Extensions = internal.SanitizeExtensions(x.Extensions) h.HeaderProps = x.HeaderProps - h.Extensions.sanitize() - if len(h.Extensions) == 0 { - h.Extensions = nil - } return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go index 395ececae8..d667b705be 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -89,17 +89,9 @@ func (e Extensions) GetObject(key string, out interface{}) error { return nil } -func (e Extensions) sanitize() { - for k := range e { - if !isExtensionKey(k) { - delete(e, k) - } - } -} - func (e Extensions) sanitizeWithExtra() (extra map[string]any) { for k, v := range e { - if !isExtensionKey(k) { + if !internal.IsExtensionKey(k) { if extra == nil { extra = make(map[string]any) } @@ -110,10 +102,6 @@ func (e Extensions) sanitizeWithExtra() (extra map[string]any) { return extra } -func isExtensionKey(k string) bool { - return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' -} - // VendorExtensible composition block. type VendorExtensible struct { Extensions Extensions @@ -181,6 +169,9 @@ type Info struct { // MarshalJSON marshal this to JSON func (i Info) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.InfoProps) if err != nil { return nil, err @@ -192,6 +183,16 @@ func (i Info) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + InfoProps + } + x.Extensions = i.Extensions + x.InfoProps = i.InfoProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (i *Info) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -212,11 +213,7 @@ func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decod if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - i.VendorExtensible.Extensions = x.Extensions + i.Extensions = internal.SanitizeExtensions(x.Extensions) i.InfoProps = x.InfoProps return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go index 374f90d28d..4132467d24 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go @@ -37,6 +37,18 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type simpleSchemaOmitZero struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitzero"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + // CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` @@ -53,6 +65,23 @@ type CommonValidations struct { Enum []interface{} `json:"enum,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type commonValidationsOmitZero struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // @@ -105,18 +134,18 @@ func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco if err := i.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } + i.CommonValidations = x.CommonValidations i.SimpleSchema = x.SimpleSchema - i.VendorExtensible.Extensions = x.Extensions + i.Extensions = internal.SanitizeExtensions(x.Extensions) return nil } // MarshalJSON converts this items object to JSON func (i Items) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.CommonValidations) if err != nil { return nil, err @@ -135,3 +164,17 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b4, b3, b1, b2), nil } + +func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(i.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema) + x.Ref = i.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(i.Extensions) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go index 923769ae08..63eed34601 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go @@ -42,6 +42,23 @@ type OperationProps struct { Responses *Responses `json:"responses,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type operationPropsOmitZero struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty,omitzero"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitzero"` +} + // MarshalJSON takes care of serializing operation properties to JSON // // We use a custom marhaller here to handle a special cases related to @@ -96,17 +113,16 @@ func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - o.VendorExtensible.Extensions = x.Extensions + o.Extensions = internal.SanitizeExtensions(x.Extensions) o.OperationProps = OperationProps(x.OperationPropsNoMethods) return nil } // MarshalJSON converts this items object to JSON func (o Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -118,3 +134,13 @@ func (o Operation) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go index 7cb229ac13..53d1e0aa94 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go @@ -36,6 +36,17 @@ type ParamProps struct { AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type paramPropsOmitZero struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitzero"` + Schema *Schema `json:"schema,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` +} + // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. @@ -109,19 +120,18 @@ func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } p.CommonValidations = x.CommonValidations p.SimpleSchema = x.SimpleSchema - p.VendorExtensible.Extensions = x.Extensions + p.Extensions = internal.SanitizeExtensions(x.Extensions) p.ParamProps = x.ParamProps return nil } // MarshalJSON converts this items object to JSON func (p Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.CommonValidations) if err != nil { return nil, err @@ -144,3 +154,19 @@ func (p Parameter) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2, b4, b5), nil } + +func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + ParamProps paramPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(p.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParamProps = paramPropsOmitZero(p.ParamProps) + x.Ref = p.Refable.Ref.String() + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go index 03741fcfb5..1d1588cb92 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go @@ -70,24 +70,20 @@ func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - - p.Extensions = x.Extensions - p.PathItemProps = x.PathItemProps - - if err := p.Refable.Ref.fromMap(p.Extensions); err != nil { + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - - p.Extensions.sanitize() - if len(p.Extensions) == 0 { - p.Extensions = nil - } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathItemProps = x.PathItemProps return nil } // MarshalJSON converts this items object to JSON func (p PathItem) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b3, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -103,3 +99,15 @@ func (p PathItem) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b3, b4, b5) return concated, nil } + +func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + PathItemProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathItemProps = p.PathItemProps + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go index 6699fc4c6e..18f6a9f429 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -92,7 +92,7 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco } switch k := tok.String(); { - case isExtensionKey(k): + case internal.IsExtensionKey(k): ext = nil if err := opts.UnmarshalNext(dec, &ext); err != nil { return err @@ -126,6 +126,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco // MarshalJSON converts this items object to JSON func (p Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err @@ -144,3 +147,18 @@ func (p Paths) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go index 1405bfd8ee..775b3b0c36 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -21,6 +21,8 @@ import ( "path/filepath" "github.com/go-openapi/jsonreference" + + "k8s.io/kube-openapi/pkg/internal" ) // Refable is a struct for things that accept a $ref property @@ -149,19 +151,5 @@ func (r *Ref) UnmarshalJSON(d []byte) error { } func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil + return internal.JSONRefFromMap(&r.Ref, v) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go index f01364b75c..3ff1fe1322 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go @@ -30,6 +30,15 @@ type ResponseProps struct { Examples map[string]interface{} `json:"examples,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type responsePropsOmitZero struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitzero"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + // Response describes a single response from an API Operation. // // For more information: http://goo.gl/8us55a#responseObject @@ -68,23 +77,20 @@ func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D return err } - r.Extensions = x.Extensions - r.ResponseProps = x.ResponseProps - - if err := r.Refable.Ref.fromMap(r.Extensions); err != nil { + if err := r.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - - r.Extensions.sanitize() - if len(r.Extensions) == 0 { - r.Extensions = nil - } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } // MarshalJSON converts this items object to JSON func (r Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponseProps) if err != nil { return nil, err @@ -100,6 +106,18 @@ func (r Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + ResponseProps responsePropsOmitZero `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = responsePropsOmitZero(r.ResponseProps) + return opts.MarshalNext(enc, x) +} + // NewResponse creates a new response instance func NewResponse() *Response { return new(Response) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go index c3fa68191d..d9ad760a43 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go @@ -63,6 +63,9 @@ func (r *Responses) UnmarshalJSON(data []byte) error { // MarshalJSON converts this items object to JSON func (r Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -75,6 +78,25 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + // ResponsesProps describes all responses for an operation. // It tells what is the default response and maps all responses with a // HTTP status code. @@ -148,7 +170,7 @@ func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. return nil } switch k := tok.String(); { - case isExtensionKey(k): + case internal.IsExtensionKey(k): ext = nil if err := opts.UnmarshalNext(dec, &ext); err != nil { return err diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go index 9add0c163d..dfbb2e05cb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go @@ -196,6 +196,46 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type schemaPropsOmitZero struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitzero"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitzero"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitzero"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitzero"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"` + Definitions Definitions `json:"definitions,omitempty"` +} + // SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` @@ -204,6 +244,15 @@ type SwaggerSchemaProps struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type swaggerSchemaPropsOmitZero struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitzero"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + Example interface{} `json:"example,omitempty"` +} + // Schema the schema object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. // This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) @@ -434,6 +483,9 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { // MarshalJSON marshal this to JSON func (s Schema) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SchemaProps) if err != nil { return nil, fmt.Errorf("schema props %v", err) @@ -465,6 +517,31 @@ func (s Schema) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } +func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + SchemaProps schemaPropsOmitZero `json:",inline"` + SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"` + Schema string `json:"$schema,omitempty"` + Ref string `json:"$ref,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps)) + for k, v := range s.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range s.ExtraProps { + x.ArbitraryKeys[k] = v + } + x.SchemaProps = schemaPropsOmitZero(s.SchemaProps) + x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps) + x.Ref = s.Ref.String() + x.Schema = string(s.Schema) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *Schema) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -547,7 +624,7 @@ func (s *Schema) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec } s.ExtraProps = x.Extensions.sanitizeWithExtra() - s.VendorExtensible.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SchemaProps = x.SchemaProps s.SwaggerSchemaProps = x.SwaggerSchemaProps return nil diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go index 34723fb715..e2b7da14cf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go @@ -18,6 +18,7 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) @@ -45,6 +46,9 @@ type SecurityScheme struct { // MarshalJSON marshal this to JSON func (s SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -56,6 +60,16 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { @@ -72,11 +86,7 @@ func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *js if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - s.VendorExtensible.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SecuritySchemeProps = x.SecuritySchemeProps return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go index f6cb7da3f2..c8f3beaa35 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go @@ -35,6 +35,9 @@ type Swagger struct { // MarshalJSON marshals this swagger structure to json func (s Swagger) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SwaggerProps) if err != nil { return nil, err @@ -46,12 +49,22 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SwaggerProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SwaggerProps = s.SwaggerProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals a swagger spec from json func (s *Swagger) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { return jsonv2.Unmarshal(data, s) } - var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { return err @@ -75,15 +88,8 @@ func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.De if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - - s.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SwaggerProps = x.SwaggerProps - - s.Extensions.sanitize() - if len(s.Extensions) == 0 { - s.Extensions = nil - } - return nil } @@ -126,6 +132,9 @@ var jsFalse = []byte("false") // MarshalJSON convert this object to JSON func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if s.Schema != nil { return json.Marshal(s.Schema) } @@ -136,6 +145,18 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + + if s.Schema == nil && !s.Allows { + return enc.WriteToken(jsonv2.False) + } + return enc.WriteToken(jsonv2.True) +} + // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -143,15 +164,15 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch + if len(data) > 0 && data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Schema = &sch + nw.Allows = true + } else { + json.Unmarshal(data, &nw.Allows) } *s = nw return nil @@ -185,6 +206,9 @@ type SchemaOrStringArray struct { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if len(s.Property) > 0 { return json.Marshal(s.Property) } @@ -194,6 +218,17 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Property) > 0 { + return opts.MarshalNext(enc, s.Property) + } + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + return enc.WriteToken(jsonv2.Null) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -347,12 +382,23 @@ func (s *SchemaOrArray) ContainsType(name string) bool { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if s.Schemas != nil { return json.Marshal(s.Schemas) } return json.Marshal(s.Schema) } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schemas != nil { + return opts.MarshalNext(enc, s.Schemas) + } + return opts.MarshalNext(enc, s.Schema) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go index 69e93b60bd..d105d52ca4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go @@ -41,6 +41,9 @@ type Tag struct { // MarshalJSON marshal this to JSON func (t Tag) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(t) + } b1, err := json.Marshal(t.TagProps) if err != nil { return nil, err @@ -52,6 +55,16 @@ func (t Tag) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + TagProps + } + x.Extensions = internal.SanitizeExtensions(t.Extensions) + x.TagProps = t.TagProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (t *Tag) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -72,11 +85,7 @@ func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decode if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - t.VendorExtensible.Extensions = x.Extensions + t.Extensions = internal.SanitizeExtensions(x.Extensions) t.TagProps = x.TagProps return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 015bda27a9..585bb5d680 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go/compute v1.18.0 +# cloud.google.com/go/compute v1.19.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 @@ -178,7 +178,7 @@ github.com/aliyun/credentials-go/credentials/utils # github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go-v2 v1.17.3 +# github.com/aws/aws-sdk-go-v2 v1.17.8 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2 github.com/aws/aws-sdk-go-v2/aws @@ -199,10 +199,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.18.8 +# github.com/aws/aws-sdk-go-v2/config v1.18.21 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.13.8 +# github.com/aws/aws-sdk-go-v2/credentials v1.13.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -211,17 +211,17 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.2 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.32 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.26 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.33 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 @@ -234,20 +234,20 @@ github.com/aws/aws-sdk-go-v2/service/ecr/types github.com/aws/aws-sdk-go-v2/service/ecrpublic github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecrpublic/types -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.26 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 +# github.com/aws/aws-sdk-go-v2/service/sso v1.12.8 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.8 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 +# github.com/aws/aws-sdk-go-v2/service/sts v1.18.9 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints @@ -319,8 +319,8 @@ github.com/cockroachdb/apd/v2 # github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be ## explicit github.com/common-nighthawk/go-figure -# github.com/containerd/stargz-snapshotter/estargz v0.12.1 -## explicit; go 1.16 +# github.com/containerd/stargz-snapshotter/estargz v0.14.3 +## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/coreos/go-oidc/v3 v3.5.0 @@ -341,7 +341,7 @@ github.com/digitorus/timestamp # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/docker/cli v20.10.21+incompatible +# github.com/docker/cli v23.0.1+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -350,7 +350,7 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.1+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v20.10.24+incompatible +# github.com/docker/docker v23.0.1+incompatible ## explicit github.com/docker/docker/pkg/homedir # github.com/docker/docker-credential-helpers v0.7.0 @@ -385,6 +385,7 @@ github.com/go-chi/chi/middleware github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher github.com/go-jose/go-jose/v3/json +github.com/go-jose/go-jose/v3/jwt # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log @@ -526,7 +527,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28 +# github.com/google/go-containerregistry v0.14.0 ## explicit; go 1.18 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression @@ -640,8 +641,8 @@ github.com/json-iterator/go # github.com/kelseyhightower/envconfig v1.4.0 ## explicit github.com/kelseyhightower/envconfig -# github.com/klauspost/compress v1.15.11 -## explicit; go 1.17 +# github.com/klauspost/compress v1.16.0 +## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -875,7 +876,7 @@ github.com/rogpeppe/go-internal/semver github.com/sassoftware/relic/lib/pkcs7 github.com/sassoftware/relic/lib/x509tools github.com/sassoftware/relic/signers/sigerrors -# github.com/secure-systems-lab/go-securesystemslib v0.4.0 +# github.com/secure-systems-lab/go-securesystemslib v0.5.0 ## explicit; go 1.17 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse @@ -963,7 +964,7 @@ github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1 github.com/sigstore/rekor/pkg/types/intoto github.com/sigstore/rekor/pkg/types/intoto/v0.0.1 github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.5.1 +# github.com/sigstore/sigstore v1.6.2 ## explicit; go 1.18 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/fulcioroots @@ -1012,7 +1013,7 @@ github.com/spf13/viper/internal/encoding/javaproperties github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml -# github.com/spiffe/go-spiffe/v2 v2.1.2 +# github.com/spiffe/go-spiffe/v2 v2.1.4 ## explicit; go 1.17 github.com/spiffe/go-spiffe/v2/bundle/jwtbundle github.com/spiffe/go-spiffe/v2/bundle/spiffebundle @@ -1050,8 +1051,8 @@ github.com/syndtr/goleveldb/leveldb/util # github.com/tchap/go-patricia/v2 v2.3.1 ## explicit; go 1.16 github.com/tchap/go-patricia/v2/patricia -# github.com/tektoncd/pipeline v0.45.0 -## explicit; go 1.18 +# github.com/tektoncd/pipeline v0.47.0 +## explicit; go 1.19 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline github.com/tektoncd/pipeline/pkg/apis/pipeline/pod @@ -1070,6 +1071,7 @@ github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alp github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1 github.com/tektoncd/pipeline/pkg/list github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag +github.com/tektoncd/pipeline/pkg/result github.com/tektoncd/pipeline/pkg/spire/config github.com/tektoncd/pipeline/pkg/substitution github.com/tektoncd/pipeline/test/diff @@ -1167,7 +1169,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/otel v1.13.0 +# go.opentelemetry.io/otel v1.14.0 ## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1180,7 +1182,7 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 -# go.opentelemetry.io/otel/trace v1.13.0 +# go.opentelemetry.io/otel/trace v1.14.0 ## explicit; go 1.18 go.opentelemetry.io/otel/trace # go.step.sm/crypto v0.25.0 @@ -1248,9 +1250,10 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20220823124025-807a23277127 +# golang.org/x/exp v0.0.0-20230307190834-24139beb5833 ## explicit; go 1.18 golang.org/x/exp/constraints +golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/mod v0.10.0 ## explicit; go 1.17 @@ -1271,7 +1274,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.6.0 +# golang.org/x/oauth2 v0.7.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1316,13 +1319,14 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.6.0 +# golang.org/x/tools v0.7.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -1341,20 +1345,19 @@ golang.org/x/tools/internal/typesinternal # gomodules.xyz/jsonpatch/v2 v2.2.0 ## explicit; go 1.12 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.110.0 +# google.golang.org/api v0.116.0 ## explicit; go 1.19 google.golang.org/api/googleapi/transport google.golang.org/api/idtoken google.golang.org/api/impersonate google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/impersonate google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/support/bundler -google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine @@ -1367,7 +1370,7 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 +# google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/status @@ -1549,7 +1552,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.26.1 => k8s.io/apimachinery v0.25.5 +# k8s.io/apimachinery v0.26.4 => k8s.io/apimachinery v0.25.5 ## explicit; go 1.19 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1922,7 +1925,7 @@ k8s.io/gengo/generator k8s.io/gengo/namer k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/klog/v2 v2.90.0 +# k8s.io/klog/v2 v2.90.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1930,13 +1933,13 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20230123231816-1cb3ae25d79a +# k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a ## explicit; go 1.18 k8s.io/kube-openapi/pkg/builder3/util +k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal -k8s.io/kube-openapi/pkg/internal/handler k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv @@ -1944,7 +1947,7 @@ k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20230115233650-391b47cb4029 +# k8s.io/utils v0.0.0-20230209194617-a36077c30491 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock