From ae2c97924496a699963c44020caebd52336bf143 Mon Sep 17 00:00:00 2001 From: Tiffany Wang Date: Wed, 23 Oct 2024 04:09:55 -0700 Subject: [PATCH] make gogenerate-aws-sdk --- aws-sdk-go-v2/service/ecs/api_client.go | 926 + aws-sdk-go-v2/service/ecs/api_client_test.go | 126 + .../ecs/api_op_CreateCapacityProvider.go | 193 + .../service/ecs/api_op_CreateCluster.go | 253 + .../service/ecs/api_op_CreateService.go | 562 + .../service/ecs/api_op_CreateTaskSet.go | 276 + .../ecs/api_op_DeleteAccountSetting.go | 164 + .../service/ecs/api_op_DeleteAttributes.go | 158 + .../ecs/api_op_DeleteCapacityProvider.go | 168 + .../service/ecs/api_op_DeleteCluster.go | 160 + .../service/ecs/api_op_DeleteService.go | 179 + .../ecs/api_op_DeleteTaskDefinitions.go | 182 + .../service/ecs/api_op_DeleteTaskSet.go | 169 + .../ecs/api_op_DeregisterContainerInstance.go | 188 + .../ecs/api_op_DeregisterTaskDefinition.go | 172 + .../ecs/api_op_DescribeCapacityProviders.go | 179 + .../service/ecs/api_op_DescribeClusters.go | 166 + .../ecs/api_op_DescribeContainerInstances.go | 169 + .../service/ecs/api_op_DescribeServices.go | 626 + .../ecs/api_op_DescribeTaskDefinition.go | 190 + .../service/ecs/api_op_DescribeTaskSets.go | 172 + .../service/ecs/api_op_DescribeTasks.go | 593 + .../ecs/api_op_DiscoverPollEndpoint.go | 170 + .../service/ecs/api_op_ExecuteCommand.go | 196 + .../service/ecs/api_op_GetTaskProtection.go | 165 + .../service/ecs/api_op_ListAccountSettings.go | 285 + .../service/ecs/api_op_ListAttributes.go | 290 + .../service/ecs/api_op_ListClusters.go | 264 + .../ecs/api_op_ListContainerInstances.go | 294 + .../service/ecs/api_op_ListServices.go | 277 + .../ecs/api_op_ListServicesByNamespace.go | 291 + .../service/ecs/api_op_ListTagsForResource.go | 152 + .../ecs/api_op_ListTaskDefinitionFamilies.go | 291 + .../service/ecs/api_op_ListTaskDefinitions.go | 291 + aws-sdk-go-v2/service/ecs/api_op_ListTasks.go | 309 + .../service/ecs/api_op_PutAccountSetting.go | 258 + .../ecs/api_op_PutAccountSettingDefault.go | 244 + .../service/ecs/api_op_PutAttributes.go | 163 + .../ecs/api_op_PutClusterCapacityProviders.go | 210 + .../ecs/api_op_RegisterContainerInstance.go | 211 + .../ecs/api_op_RegisterTaskDefinition.go | 447 + aws-sdk-go-v2/service/ecs/api_op_RunTask.go | 424 + aws-sdk-go-v2/service/ecs/api_op_StartTask.go | 270 + aws-sdk-go-v2/service/ecs/api_op_StopTask.go | 180 + .../api_op_SubmitAttachmentStateChanges.go | 157 + .../ecs/api_op_SubmitContainerStateChange.go | 170 + .../ecs/api_op_SubmitTaskStateChange.go | 180 + .../service/ecs/api_op_TagResource.go | 179 + .../service/ecs/api_op_UntagResource.go | 152 + .../ecs/api_op_UpdateCapacityProvider.go | 156 + .../service/ecs/api_op_UpdateCluster.go | 174 + .../ecs/api_op_UpdateClusterSettings.go | 166 + .../ecs/api_op_UpdateContainerAgent.go | 177 + .../api_op_UpdateContainerInstancesState.go | 218 + .../service/ecs/api_op_UpdateService.go | 461 + .../ecs/api_op_UpdateServicePrimaryTaskSet.go | 169 + .../ecs/api_op_UpdateTaskProtection.go | 211 + .../service/ecs/api_op_UpdateTaskSet.go | 172 + aws-sdk-go-v2/service/ecs/auth.go | 317 + aws-sdk-go-v2/service/ecs/deserializers.go | 20256 ++++++++++++++++ aws-sdk-go-v2/service/ecs/doc.go | 26 + aws-sdk-go-v2/service/ecs/endpoints.go | 532 + .../service/ecs/endpoints_config_test.go | 140 + aws-sdk-go-v2/service/ecs/endpoints_test.go | 1859 ++ aws-sdk-go-v2/service/ecs/generated.json | 89 + aws-sdk-go-v2/service/ecs/go.mod | 11 + aws-sdk-go-v2/service/ecs/go.sum | 20 + .../service/ecs/go_module_metadata.go | 6 + .../ecs/internal/endpoints/endpoints.go | 542 + .../ecs/internal/endpoints/endpoints_test.go | 12 + aws-sdk-go-v2/service/ecs/options.go | 237 + aws-sdk-go-v2/service/ecs/protocol_test.go | 6 + aws-sdk-go-v2/service/ecs/serializers.go | 8226 +++++++ aws-sdk-go-v2/service/ecs/snapshot_test.go | 1406 ++ aws-sdk-go-v2/service/ecs/types/enums.go | 1297 + aws-sdk-go-v2/service/ecs/types/errors.go | 763 + aws-sdk-go-v2/service/ecs/types/types.go | 5597 +++++ aws-sdk-go-v2/service/ecs/validators.go | 3266 +++ 78 files changed, 59003 insertions(+) create mode 100644 aws-sdk-go-v2/service/ecs/api_client.go create mode 100644 aws-sdk-go-v2/service/ecs/api_client_test.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_CreateService.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteService.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListClusters.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListServices.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_ListTasks.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_RunTask.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_StartTask.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_StopTask.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_TagResource.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UntagResource.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateService.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go create mode 100644 aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go create mode 100644 aws-sdk-go-v2/service/ecs/auth.go create mode 100644 aws-sdk-go-v2/service/ecs/deserializers.go create mode 100644 aws-sdk-go-v2/service/ecs/doc.go create mode 100644 aws-sdk-go-v2/service/ecs/endpoints.go create mode 100644 aws-sdk-go-v2/service/ecs/endpoints_config_test.go create mode 100644 aws-sdk-go-v2/service/ecs/endpoints_test.go create mode 100644 aws-sdk-go-v2/service/ecs/generated.json create mode 100644 aws-sdk-go-v2/service/ecs/go.mod create mode 100644 aws-sdk-go-v2/service/ecs/go.sum create mode 100644 aws-sdk-go-v2/service/ecs/go_module_metadata.go create mode 100644 aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go create mode 100644 aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints_test.go create mode 100644 aws-sdk-go-v2/service/ecs/options.go create mode 100644 aws-sdk-go-v2/service/ecs/protocol_test.go create mode 100644 aws-sdk-go-v2/service/ecs/serializers.go create mode 100644 aws-sdk-go-v2/service/ecs/snapshot_test.go create mode 100644 aws-sdk-go-v2/service/ecs/types/enums.go create mode 100644 aws-sdk-go-v2/service/ecs/types/errors.go create mode 100644 aws-sdk-go-v2/service/ecs/types/types.go create mode 100644 aws-sdk-go-v2/service/ecs/validators.go diff --git a/aws-sdk-go-v2/service/ecs/api_client.go b/aws-sdk-go-v2/service/ecs/api_client.go new file mode 100644 index 00000000000..16e7e29d251 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_client.go @@ -0,0 +1,926 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "sync/atomic" + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + cryptorand "crypto/rand" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + "errors" + "fmt" + "net/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "net" + "github.com/aws/aws-sdk-go-v2/aws/retry" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithyrand "github.com/aws/smithy-go/rand" + "time" + "github.com/aws/smithy-go/tracing" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" +) + +const ServiceID = "ECS" +const ServiceAPIVersion = "2014-11-13" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ecs") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ecs") +} + +// Client provides the API client to make operations call for Amazon EC2 Container +// Service. +type Client struct { + options Options + + // Difference between the time reported by the server and the client +timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + +setResolvedDefaultsMode(&options) + +resolveRetryer(&options) + +resolveHTTPClient(&options) + +resolveHTTPSignerV4(&options) + +resolveIdempotencyTokenProvider(&options) + +resolveEndpointResolverV2(&options) + +resolveTracerProvider(&options) + +resolveMeterProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + +ignoreAnonymousAuth(&options) + +wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + +finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func (o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ecs") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { + +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) +} +if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) +} +if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) +} +if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) +} + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), +}), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} + func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) + } + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ... func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ecs", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize ( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) +ctx, span := tracer.StartSpan(ctx, "RetryLoop") +defer span.End() + +return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func resolveIdempotencyTokenProvider(o *Options) { + if o.IdempotencyTokenProvider != nil { + return + } + o.IdempotencyTokenProvider = smithyrand.NewUUIDIdempotencyToken(cryptorand.Reader) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ecs") + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { return err } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { return err } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +// IdempotencyTokenProvider interface for providing idempotency token +type IdempotencyTokenProvider interface { + GetIdempotencyToken() (string, error) +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +type spanInitializeStart struct { + +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize ( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + +return next.HandleInitialize(ctx, in) +} +type spanInitializeEnd struct { + +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize ( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) +span.End() + +return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { + +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize ( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + +return next.HandleSerialize(ctx, in) +} +type spanBuildRequestEnd struct { + +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild ( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) +span.End() + +return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/aws-sdk-go-v2/service/ecs/api_client_test.go b/aws-sdk-go-v2/service/ecs/api_client_test.go new file mode 100644 index 00000000000..b5266ec7d3f --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_client_test.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "context" + "net/http" + "io/ioutil" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "strings" + "testing" +) + +func TestClient_resolveRetryOptions(t *testing.T) { + nopClient := smithyhttp.ClientDoFunc(func(_ *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader("")), + }, nil + }) + + cases := map[string]struct{ + defaultsMode aws.DefaultsMode + retryer aws.Retryer + retryMaxAttempts int + opRetryMaxAttempts *int + retryMode aws.RetryMode + expectClientRetryMode aws.RetryMode + expectClientMaxAttempts int + expectOpMaxAttempts int + }{ + "defaults": { + defaultsMode: aws.DefaultsModeStandard, + expectClientRetryMode: aws.RetryModeStandard, + expectClientMaxAttempts: 3, + expectOpMaxAttempts: 3, + }, + "custom default retry": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(2), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 2, + }, + "custom op no change max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(10), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op 0 max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(0), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + client := NewFromConfig(aws.Config{ + DefaultsMode: c.defaultsMode, + Retryer: func() func() aws.Retryer { + if c.retryer == nil { return nil } + + return func() aws.Retryer { return c.retryer } + }(), + HTTPClient: nopClient, + RetryMaxAttempts: c.retryMaxAttempts, + RetryMode: c.retryMode, + }, func (o *Options) { + if o.Retryer == nil { + t.Errorf("retryer must not be nil in functional options") + } + }) + + if e, a := c.expectClientRetryMode, client.options.RetryMode; e != a { + t.Errorf("expect %v retry mode, got %v", e, a) + } + if e, a := c.expectClientMaxAttempts, client.options.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v max attempts, got %v", e, a) + } + + _, _, err := client.invokeOperation(context.Background(), "mockOperation", struct{}{}, + []func(*Options){ + func(o *Options) { + if c.opRetryMaxAttempts == nil { + return + } + o.RetryMaxAttempts = *c.opRetryMaxAttempts + }, + }, + func(s *middleware.Stack, o Options) error { + s.Initialize.Clear() + s.Serialize.Clear() + s.Build.Clear() + s.Finalize.Clear() + s.Deserialize.Clear() + + if e, a := c.expectOpMaxAttempts, o.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v op max attempts, got %v", e, a) + } + return nil + }) + if err != nil { + t.Fatalf("expect no operation error, got %v", err) + } + }) + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go b/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go new file mode 100644 index 00000000000..4883bee278e --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Creates a new capacity provider. Capacity providers are associated with an +// Amazon ECS cluster and are used in capacity provider strategies to facilitate +// cluster auto scaling. +// +// Only capacity providers that use an Auto Scaling group can be created. Amazon +// ECS tasks on Fargate use the FARGATE and FARGATE_SPOT capacity providers. These +// providers are available to all accounts in the Amazon Web Services Regions that +// Fargate supports. +func (c *Client) CreateCapacityProvider(ctx context.Context, params *CreateCapacityProviderInput, optFns ...func(*Options)) (*CreateCapacityProviderOutput, error) { + if params == nil { params = &CreateCapacityProviderInput{} } + + result, metadata, err := c.invokeOperation(ctx, "CreateCapacityProvider", params, optFns, c.addOperationCreateCapacityProviderMiddlewares) + if err != nil { return nil, err } + + out := result.(*CreateCapacityProviderOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateCapacityProviderInput struct { + + // The details of the Auto Scaling group for the capacity provider. + // + // This member is required. + AutoScalingGroupProvider *types.AutoScalingGroupProvider + + // The name of the capacity provider. Up to 255 characters are allowed. They + // include letters (both upper and lowercase letters), numbers, underscores (_), + // and hyphens (-). The name can't be prefixed with " aws ", " ecs ", or " fargate + // ". + // + // This member is required. + Name *string + + // The metadata that you apply to the capacity provider to categorize and organize + // them more conveniently. Each tag consists of a key and an optional value. You + // define both of them. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateCapacityProviderOutput struct { + + // The full description of the new capacity provider. + CapacityProvider *types.CapacityProvider + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateCapacityProviderMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCapacityProvider{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCapacityProvider{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateCapacityProvider"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateCapacityProviderValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCapacityProvider(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCapacityProvider(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateCapacityProvider", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go b/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go new file mode 100644 index 00000000000..7371b395ebd --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go @@ -0,0 +1,253 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Creates a new Amazon ECS cluster. By default, your account receives a default +// cluster when you launch your first container instance. However, you can create +// your own cluster with a unique name. +// +// When you call the [CreateCluster] API operation, Amazon ECS attempts to create the Amazon ECS +// service-linked role for your account. This is so that it can manage required +// resources in other Amazon Web Services services on your behalf. However, if the +// user that makes the call doesn't have permissions to create the service-linked +// role, it isn't created. For more information, see [Using service-linked roles for Amazon ECS]in the Amazon Elastic +// Container Service Developer Guide. +// +// [Using service-linked roles for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html +// [CreateCluster]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCluster.html +func (c *Client) CreateCluster(ctx context.Context, params *CreateClusterInput, optFns ...func(*Options)) (*CreateClusterOutput, error) { + if params == nil { params = &CreateClusterInput{} } + + result, metadata, err := c.invokeOperation(ctx, "CreateCluster", params, optFns, c.addOperationCreateClusterMiddlewares) + if err != nil { return nil, err } + + out := result.(*CreateClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateClusterInput struct { + + // The short name of one or more capacity providers to associate with the cluster. + // A capacity provider must be associated with a cluster before it can be included + // as part of the default capacity provider strategy of the cluster or used in a + // capacity provider strategy when calling the [CreateService]or [RunTask] actions. + // + // If specifying a capacity provider that uses an Auto Scaling group, the capacity + // provider must be created but not associated with another cluster. New Auto + // Scaling group capacity providers can be created with the [CreateCapacityProvider]API operation. + // + // To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT + // capacity providers. The Fargate capacity providers are available to all accounts + // and only need to be associated with a cluster to be used. + // + // The [PutCapacityProvider] API operation is used to update the list of available capacity providers + // for a cluster after the cluster is created. + // + // [CreateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html + // [PutCapacityProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html + // [CreateCapacityProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html + // [RunTask]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html + CapacityProviders []string + + // The name of your cluster. If you don't specify a name for your cluster, you + // create a cluster that's named default . Up to 255 letters (uppercase and + // lowercase), numbers, underscores, and hyphens are allowed. + ClusterName *string + + // The execute command configuration for the cluster. + Configuration *types.ClusterConfiguration + + // The capacity provider strategy to set as the default for the cluster. After a + // default capacity provider strategy is set for a cluster, when you call the [CreateService]or [RunTask] + // APIs with no capacity provider strategy or launch type specified, the default + // capacity provider strategy for the cluster is used. + // + // If a default capacity provider strategy isn't defined for a cluster when it was + // created, it can be defined later with the [PutClusterCapacityProviders]API operation. + // + // [CreateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html + // [PutClusterCapacityProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html + // [RunTask]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html + DefaultCapacityProviderStrategy []types.CapacityProviderStrategyItem + + // Use this parameter to set a default Service Connect namespace. After you set a + // default Service Connect namespace, any new services with Service Connect turned + // on that are created in the cluster are added as client services in the + // namespace. This setting only applies to new services that set the enabled + // parameter to true in the ServiceConnectConfiguration . You can set the namespace + // of each service individually in the ServiceConnectConfiguration to override + // this default parameter. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectDefaults *types.ClusterServiceConnectDefaultsRequest + + // The setting to use when creating a cluster. This parameter is used to turn on + // CloudWatch Container Insights for a cluster. If this value is specified, it + // overrides the containerInsights value set with [PutAccountSetting] or [PutAccountSettingDefault]. + // + // [PutAccountSettingDefault]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html + // [PutAccountSetting]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html + Settings []types.ClusterSetting + + // The metadata that you apply to the cluster to help you categorize and organize + // them. Each tag consists of a key and an optional value. You define both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateClusterOutput struct { + + // The full description of your new cluster. + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCluster{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCluster{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCluster(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateCluster", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_CreateService.go b/aws-sdk-go-v2/service/ecs/api_op_CreateService.go new file mode 100644 index 00000000000..209ab20db1d --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_CreateService.go @@ -0,0 +1,562 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Runs and maintains your desired number of tasks from a specified task +// definition. If the number of tasks running in a service drops below the +// desiredCount , Amazon ECS runs another copy of the task in the specified +// cluster. To update an existing service, use [UpdateService]. +// +// On March 21, 2024, a change was made to resolve the task definition revision +// before authorization. When a task definition revision is not specified, +// authorization will occur using the latest revision of a task definition. +// +// Amazon Elastic Inference (EI) is no longer available to customers. +// +// In addition to maintaining the desired count of tasks in your service, you can +// optionally run your service behind one or more load balancers. The load +// balancers distribute traffic across the tasks that are associated with the +// service. For more information, see [Service load balancing]in the Amazon Elastic Container Service +// Developer Guide. +// +// You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume +// when creating or updating a service. volumeConfigurations is only supported for +// REPLICA service and not DAEMON service. For more infomation, see [Amazon EBS volumes]in the Amazon +// Elastic Container Service Developer Guide. +// +// Tasks for services that don't use a load balancer are considered healthy if +// they're in the RUNNING state. Tasks for services that use a load balancer are +// considered healthy if they're in the RUNNING state and are reported as healthy +// by the load balancer. +// +// There are two service scheduler strategies available: +// +// - REPLICA - The replica scheduling strategy places and maintains your desired +// number of tasks across your cluster. By default, the service scheduler spreads +// tasks across Availability Zones. You can use task placement strategies and +// constraints to customize task placement decisions. For more information, see [Service scheduler concepts] +// in the Amazon Elastic Container Service Developer Guide. +// +// - DAEMON - The daemon scheduling strategy deploys exactly one task on each +// active container instance that meets all of the task placement constraints that +// you specify in your cluster. The service scheduler also evaluates the task +// placement constraints for running tasks. It also stops tasks that don't meet the +// placement constraints. When using this strategy, you don't need to specify a +// desired number of tasks, a task placement strategy, or use Service Auto Scaling +// policies. For more information, see [Service scheduler concepts]in the Amazon Elastic Container Service +// Developer Guide. +// +// You can optionally specify a deployment configuration for your service. The +// deployment is initiated by changing properties. For example, the deployment +// might be initiated by the task definition or by your desired count of a service. +// You can use [UpdateService]. The default value for a replica service for minimumHealthyPercent +// is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. +// +// If a service uses the ECS deployment controller, the minimum healthy percent +// represents a lower limit on the number of tasks in a service that must remain in +// the RUNNING state during a deployment. Specifically, it represents it as a +// percentage of your desired number of tasks (rounded up to the nearest integer). +// This happens when any of your container instances are in the DRAINING state if +// the service contains tasks using the EC2 launch type. Using this parameter, you +// can deploy without using additional cluster capacity. For example, if you set +// your service to have desired number of four tasks and a minimum healthy percent +// of 50%, the scheduler might stop two existing tasks to free up cluster capacity +// before starting two new tasks. If they're in the RUNNING state, tasks for +// services that don't use a load balancer are considered healthy . If they're in +// the RUNNING state and reported as healthy by the load balancer, tasks for +// services that do use a load balancer are considered healthy . The default value +// for minimum healthy percent is 100%. +// +// If a service uses the ECS deployment controller, the maximum percent parameter +// represents an upper limit on the number of tasks in a service that are allowed +// in the RUNNING or PENDING state during a deployment. Specifically, it +// represents it as a percentage of the desired number of tasks (rounded down to +// the nearest integer). This happens when any of your container instances are in +// the DRAINING state if the service contains tasks using the EC2 launch type. +// Using this parameter, you can define the deployment batch size. For example, if +// your service has a desired number of four tasks and a maximum percent value of +// 200%, the scheduler may start four new tasks before stopping the four older +// tasks (provided that the cluster resources required to do this are available). +// The default value for maximum percent is 200%. +// +// If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller +// types and tasks that use the EC2 launch type, the minimum healthy percent and +// maximum percent values are used only to define the lower and upper limit on the +// number of the tasks in the service that remain in the RUNNING state. This is +// while the container instances are in the DRAINING state. If the tasks in the +// service use the Fargate launch type, the minimum healthy percent and maximum +// percent values aren't used. This is the case even if they're currently visible +// when describing your service. +// +// When creating a service that uses the EXTERNAL deployment controller, you can +// specify only parameters that aren't controlled at the task set level. The only +// required parameter is the service name. You control your services using the [CreateTaskSet]. +// For more information, see [Amazon ECS deployment types]in the Amazon Elastic Container Service Developer +// Guide. +// +// When the service scheduler launches new tasks, it determines task placement. +// For information about task placement and task placement strategies, see [Amazon ECS task placement]in the +// Amazon Elastic Container Service Developer Guide +// +// [Amazon ECS task placement]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement.html +// [Service scheduler concepts]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html +// [Amazon ECS deployment types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +// [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html +// [CreateTaskSet]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateTaskSet.html +// [Service load balancing]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html +// [Amazon EBS volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types +func (c *Client) CreateService(ctx context.Context, params *CreateServiceInput, optFns ...func(*Options)) (*CreateServiceOutput, error) { + if params == nil { params = &CreateServiceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "CreateService", params, optFns, c.addOperationCreateServiceMiddlewares) + if err != nil { return nil, err } + + out := result.(*CreateServiceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateServiceInput struct { + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // underscores, and hyphens are allowed. Service names must be unique within a + // cluster, but you can have similarly named services in multiple clusters within a + // Region or across multiple Regions. + // + // This member is required. + ServiceName *string + + // The capacity provider strategy to use for the service. + // + // If a capacityProviderStrategy is specified, the launchType parameter must be + // omitted. If no capacityProviderStrategy or launchType is specified, the + // defaultCapacityProviderStrategy for the cluster is used. + // + // A capacity provider strategy may contain a maximum of 6 capacity providers. + CapacityProviderStrategy []types.CapacityProviderStrategyItem + + // An identifier that you provide to ensure the idempotency of the request. It + // must be unique and is case sensitive. Up to 36 ASCII characters in the range of + // 33-126 (inclusive) are allowed. + ClientToken *string + + // The short name or full Amazon Resource Name (ARN) of the cluster that you run + // your service on. If you do not specify a cluster, the default cluster is + // assumed. + Cluster *string + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *types.DeploymentConfiguration + + // The deployment controller to use for the service. If no deployment controller + // is specified, the default value of ECS is used. + DeploymentController *types.DeploymentController + + // The number of instantiations of the specified task definition to place and keep + // running in your service. + // + // This is required if schedulingStrategy is REPLICA or isn't specified. If + // schedulingStrategy is DAEMON then this isn't required. + DesiredCount *int32 + + // Specifies whether to turn on Amazon ECS managed tags for the tasks within the + // service. For more information, see [Tagging your Amazon ECS resources]in the Amazon Elastic Container Service + // Developer Guide. + // + // When you use Amazon ECS managed tags, you need to set the propagateTags request + // parameter. + // + // [Tagging your Amazon ECS resources]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html + EnableECSManagedTags bool + + // Determines whether the execute command functionality is turned on for the + // service. If true , this enables execute command functionality on all containers + // in the service tasks. + EnableExecuteCommand bool + + // The period of time, in seconds, that the Amazon ECS service scheduler ignores + // unhealthy Elastic Load Balancing target health checks after a task has first + // started. This is only used when your service is configured to use a load + // balancer. If your service has a load balancer defined and you don't specify a + // health check grace period value, the default value of 0 is used. + // + // If you do not use an Elastic Load Balancing, we recommend that you use the + // startPeriod in the task definition health check parameters. For more + // information, see [Health check]. + // + // If your service's tasks take a while to start and respond to Elastic Load + // Balancing health checks, you can specify a health check grace period of up to + // 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service + // scheduler ignores health check status. This grace period can prevent the service + // scheduler from marking tasks as unhealthy and stopping them before they have + // time to come up. + // + // [Health check]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html + HealthCheckGracePeriodSeconds *int32 + + // The infrastructure that you run your service on. For more information, see [Amazon ECS launch types] in + // the Amazon Elastic Container Service Developer Guide. + // + // The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure. + // + // Fargate Spot infrastructure is available for use but a capacity provider + // strategy must be used. For more information, see [Fargate capacity providers]in the Amazon ECS Developer + // Guide. + // + // The EC2 launch type runs your tasks on Amazon EC2 instances registered to your + // cluster. + // + // The EXTERNAL launch type runs your tasks on your on-premises server or virtual + // machine (VM) capacity registered to your cluster. + // + // A service can use either a launch type or a capacity provider strategy. If a + // launchType is specified, the capacityProviderStrategy parameter must be omitted. + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + // [Fargate capacity providers]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-capacity-providers.html + LaunchType types.LaunchType + + // A load balancer object representing the load balancers to use with your + // service. For more information, see [Service load balancing]in the Amazon Elastic Container Service + // Developer Guide. + // + // If the service uses the rolling update ( ECS ) deployment controller and using + // either an Application Load Balancer or Network Load Balancer, you must specify + // one or more target group ARNs to attach to the service. The service-linked role + // is required for services that use multiple target groups. For more information, + // see [Using service-linked roles for Amazon ECS]in the Amazon Elastic Container Service Developer Guide. + // + // If the service uses the CODE_DEPLOY deployment controller, the service is + // required to use either an Application Load Balancer or Network Load Balancer. + // When creating an CodeDeploy deployment group, you specify two target groups + // (referred to as a targetGroupPair ). During a deployment, CodeDeploy determines + // which task set in your service has the status PRIMARY , and it associates one + // target group with it. Then, it also associates the other target group with the + // replacement task set. The load balancer can also have up to two listeners: a + // required listener for production traffic and an optional listener that you can + // use to perform validation tests with Lambda functions before routing production + // traffic to it. + // + // If you use the CODE_DEPLOY deployment controller, these values can be changed + // when updating the service. + // + // For Application Load Balancers and Network Load Balancers, this object must + // contain the load balancer target group ARN, the container name, and the + // container port to access from the load balancer. The container name must be as + // it appears in a container definition. The load balancer name parameter must be + // omitted. When a task from this service is placed on a container instance, the + // container instance and port combination is registered as a target in the target + // group that's specified here. + // + // For Classic Load Balancers, this object must contain the load balancer name, + // the container name , and the container port to access from the load balancer. + // The container name must be as it appears in a container definition. The target + // group ARN parameter must be omitted. When a task from this service is placed on + // a container instance, the container instance is registered with the load + // balancer that's specified here. + // + // Services with tasks that use the awsvpc network mode (for example, those with + // the Fargate launch type) only support Application Load Balancers and Network + // Load Balancers. Classic Load Balancers aren't supported. Also, when you create + // any target groups for these services, you must choose ip as the target type, + // not instance . This is because tasks that use the awsvpc network mode are + // associated with an elastic network interface, not an Amazon EC2 instance. + // + // [Service load balancing]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html + // [Using service-linked roles for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html + LoadBalancers []types.LoadBalancer + + // The network configuration for the service. This parameter is required for task + // definitions that use the awsvpc network mode to receive their own elastic + // network interface, and it isn't supported for other network modes. For more + // information, see [Task networking]in the Amazon Elastic Container Service Developer Guide. + // + // [Task networking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html + NetworkConfiguration *types.NetworkConfiguration + + // An array of placement constraint objects to use for tasks in your service. You + // can specify a maximum of 10 constraints for each task. This limit includes + // constraints in the task definition and those specified at runtime. + PlacementConstraints []types.PlacementConstraint + + // The placement strategy objects to use for tasks in your service. You can + // specify a maximum of 5 strategy rules for each service. + PlacementStrategy []types.PlacementStrategy + + // The platform version that your tasks in the service are running on. A platform + // version is specified only for tasks using the Fargate launch type. If one isn't + // specified, the LATEST platform version is used. For more information, see [Fargate platform versions] in + // the Amazon Elastic Container Service Developer Guide. + // + // [Fargate platform versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // Specifies whether to propagate the tags from the task definition to the task. + // If no value is specified, the tags aren't propagated. Tags can only be + // propagated to the task during task creation. To add tags to a task after task + // creation, use the [TagResource]API action. + // + // You must set this to a value other than NONE when you use Cost Explorer. For + // more information, see [Amazon ECS usage reports]in the Amazon Elastic Container Service Developer Guide. + // + // The default is NONE . + // + // [TagResource]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html + // [Amazon ECS usage reports]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/usage-reports.html + PropagateTags types.PropagateTags + + // The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon + // ECS to make calls to your load balancer on your behalf. This parameter is only + // permitted if you are using a load balancer with your service and your task + // definition doesn't use the awsvpc network mode. If you specify the role + // parameter, you must also specify a load balancer object with the loadBalancers + // parameter. + // + // If your account has already created the Amazon ECS service-linked role, that + // role is used for your service unless you specify a role here. The service-linked + // role is required if your task definition uses the awsvpc network mode or if the + // service is configured to use service discovery, an external deployment + // controller, multiple target groups, or Elastic Inference accelerators in which + // case you don't specify a role here. For more information, see [Using service-linked roles for Amazon ECS]in the Amazon + // Elastic Container Service Developer Guide. + // + // If your specified role has a path other than / , then you must either specify + // the full role ARN (this is recommended) or prefix the role name with the path. + // For example, if a role with the name bar has a path of /foo/ then you would + // specify /foo/bar as the role name. For more information, see [Friendly names and paths] in the IAM User + // Guide. + // + // [Friendly names and paths]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names + // [Using service-linked roles for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html + Role *string + + // The scheduling strategy to use for the service. For more information, see [Services]. + // + // There are two service scheduler strategies available: + // + // - REPLICA -The replica scheduling strategy places and maintains the desired + // number of tasks across your cluster. By default, the service scheduler spreads + // tasks across Availability Zones. You can use task placement strategies and + // constraints to customize task placement decisions. This scheduler strategy is + // required if the service uses the CODE_DEPLOY or EXTERNAL deployment controller + // types. + // + // - DAEMON -The daemon scheduling strategy deploys exactly one task on each + // active container instance that meets all of the task placement constraints that + // you specify in your cluster. The service scheduler also evaluates the task + // placement constraints for running tasks and will stop tasks that don't meet the + // placement constraints. When you're using this strategy, you don't need to + // specify a desired number of tasks, a task placement strategy, or use Service + // Auto Scaling policies. + // + // Tasks using the Fargate launch type or the CODE_DEPLOY or EXTERNAL deployment + // controller types don't support the DAEMON scheduling strategy. + // + // [Services]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html + SchedulingStrategy types.SchedulingStrategy + + // The configuration for this service to discover and connect to services, and be + // discovered by, and connected from, other services within a namespace. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectConfiguration *types.ServiceConnectConfiguration + + // The details of the service discovery registry to associate with this service. + // For more information, see [Service discovery]. + // + // Each service may be associated with one service registry. Multiple service + // registries for each service isn't supported. + // + // [Service discovery]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html + ServiceRegistries []types.ServiceRegistry + + // The metadata that you apply to the service to help you categorize and organize + // them. Each tag consists of a key and an optional value, both of which you + // define. When a service is deleted, the tags are deleted as well. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + // The family and revision ( family:revision ) or full ARN of the task definition + // to run in your service. If a revision isn't specified, the latest ACTIVE + // revision is used. + // + // A task definition must be specified if the service uses either the ECS or + // CODE_DEPLOY deployment controllers. + // + // For more information about deployment types, see [Amazon ECS deployment types]. + // + // [Amazon ECS deployment types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html + TaskDefinition *string + + // The configuration for a volume specified in the task definition as a volume + // that is configured at launch time. Currently, the only supported volume type is + // an Amazon EBS volume. + VolumeConfigurations []types.ServiceVolumeConfiguration + + noSmithyDocumentSerde +} + +type CreateServiceOutput struct { + + // The full description of your service following the create call. + // + // A service will return either a capacityProviderStrategy or launchType + // parameter, but not both, depending where one was specified when it was created. + // + // If a service is using the ECS deployment controller, the deploymentController + // and taskSets parameters will not be returned. + // + // if the service uses the CODE_DEPLOY deployment controller, the + // deploymentController , taskSets and deployments parameters will be returned, + // however the deployments parameter will be an empty list. + Service *types.Service + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateServiceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateService{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateService{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateService"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateServiceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateService(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateService(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateService", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go b/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go new file mode 100644 index 00000000000..42aae2cf949 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go @@ -0,0 +1,276 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Create a task set in the specified cluster and service. This is used when a +// service uses the EXTERNAL deployment controller type. For more information, see [Amazon ECS deployment types] +// in the Amazon Elastic Container Service Developer Guide. +// +// On March 21, 2024, a change was made to resolve the task definition revision +// before authorization. When a task definition revision is not specified, +// authorization will occur using the latest revision of a task definition. +// +// For information about the maximum number of task sets and other quotas, see [Amazon ECS service quotas] in +// the Amazon Elastic Container Service Developer Guide. +// +// [Amazon ECS deployment types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +// [Amazon ECS service quotas]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-quotas.html +func (c *Client) CreateTaskSet(ctx context.Context, params *CreateTaskSetInput, optFns ...func(*Options)) (*CreateTaskSetOutput, error) { + if params == nil { params = &CreateTaskSetInput{} } + + result, metadata, err := c.invokeOperation(ctx, "CreateTaskSet", params, optFns, c.addOperationCreateTaskSetMiddlewares) + if err != nil { return nil, err } + + out := result.(*CreateTaskSetOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTaskSetInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service to create the task set in. + // + // This member is required. + Cluster *string + + // The short name or full Amazon Resource Name (ARN) of the service to create the + // task set in. + // + // This member is required. + Service *string + + // The task definition for the tasks in the task set to use. If a revision isn't + // specified, the latest ACTIVE revision is used. + // + // This member is required. + TaskDefinition *string + + // The capacity provider strategy to use for the task set. + // + // A capacity provider strategy consists of one or more capacity providers along + // with the base and weight to assign to them. A capacity provider must be + // associated with the cluster to be used in a capacity provider strategy. The [PutClusterCapacityProviders]API + // is used to associate a capacity provider with a cluster. Only capacity providers + // with an ACTIVE or UPDATING status can be used. + // + // If a capacityProviderStrategy is specified, the launchType parameter must be + // omitted. If no capacityProviderStrategy or launchType is specified, the + // defaultCapacityProviderStrategy for the cluster is used. + // + // If specifying a capacity provider that uses an Auto Scaling group, the capacity + // provider must already be created. New capacity providers can be created with the + // [CreateCapacityProviderProvider]API operation. + // + // To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT + // capacity providers. The Fargate capacity providers are available to all accounts + // and only need to be associated with a cluster to be used. + // + // The [PutClusterCapacityProviders] API operation is used to update the list of available capacity providers + // for a cluster after the cluster is created. + // + // [PutClusterCapacityProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html + // [CreateCapacityProviderProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProviderProvider.html + CapacityProviderStrategy []types.CapacityProviderStrategyItem + + // An identifier that you provide to ensure the idempotency of the request. It + // must be unique and is case sensitive. Up to 36 ASCII characters in the range of + // 33-126 (inclusive) are allowed. + ClientToken *string + + // An optional non-unique tag that identifies this task set in external systems. + // If the task set is associated with a service discovery registry, the tasks in + // this task set will have the ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to + // the provided value. + ExternalId *string + + // The launch type that new tasks in the task set uses. For more information, see [Amazon ECS launch types] + // in the Amazon Elastic Container Service Developer Guide. + // + // If a launchType is specified, the capacityProviderStrategy parameter must be + // omitted. + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + LaunchType types.LaunchType + + // A load balancer object representing the load balancer to use with the task set. + // The supported load balancer types are either an Application Load Balancer or a + // Network Load Balancer. + LoadBalancers []types.LoadBalancer + + // An object representing the network configuration for a task set. + NetworkConfiguration *types.NetworkConfiguration + + // The platform version that the tasks in the task set uses. A platform version is + // specified only for tasks using the Fargate launch type. If one isn't specified, + // the LATEST platform version is used. + PlatformVersion *string + + // A floating-point percentage of the desired number of tasks to place and keep + // running in the task set. + Scale *types.Scale + + // The details of the service discovery registries to assign to this task set. For + // more information, see [Service discovery]. + // + // [Service discovery]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html + ServiceRegistries []types.ServiceRegistry + + // The metadata that you apply to the task set to help you categorize and organize + // them. Each tag consists of a key and an optional value. You define both. When a + // service is deleted, the tags are deleted. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateTaskSetOutput struct { + + // Information about a set of Amazon ECS tasks in either an CodeDeploy or an + // EXTERNAL deployment. A task set includes details such as the desired number of + // tasks, how many tasks are running, and whether the task set serves production + // traffic. + TaskSet *types.TaskSet + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTaskSetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateTaskSet{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateTaskSet{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTaskSet"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateTaskSetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTaskSet(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateTaskSet(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateTaskSet", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go new file mode 100644 index 00000000000..eb5a3afd728 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go @@ -0,0 +1,164 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Disables an account setting for a specified user, role, or the root user for an +// account. +func (c *Client) DeleteAccountSetting(ctx context.Context, params *DeleteAccountSettingInput, optFns ...func(*Options)) (*DeleteAccountSettingOutput, error) { + if params == nil { params = &DeleteAccountSettingInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteAccountSetting", params, optFns, c.addOperationDeleteAccountSettingMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteAccountSettingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteAccountSettingInput struct { + + // The resource name to disable the account setting for. If serviceLongArnFormat + // is specified, the ARN for your Amazon ECS services is affected. If + // taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS + // tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and + // resource ID for your Amazon ECS container instances is affected. If + // awsvpcTrunking is specified, the ENI limit for your Amazon ECS container + // instances is affected. + // + // This member is required. + Name types.SettingName + + // The Amazon Resource Name (ARN) of the principal. It can be an user, role, or + // the root user. If you specify the root user, it disables the account setting for + // all users, roles, and the root user of the account unless a user or role + // explicitly overrides these settings. If this field is omitted, the setting is + // changed only for the authenticated user. + PrincipalArn *string + + noSmithyDocumentSerde +} + +type DeleteAccountSettingOutput struct { + + // The account setting for the specified principal ARN. + Setting *types.Setting + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteAccountSettingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteAccountSetting{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteAccountSetting{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteAccountSetting"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteAccountSettingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAccountSetting(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteAccountSetting(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteAccountSetting", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go new file mode 100644 index 00000000000..227ab9eb746 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go @@ -0,0 +1,158 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deletes one or more custom attributes from an Amazon ECS resource. +func (c *Client) DeleteAttributes(ctx context.Context, params *DeleteAttributesInput, optFns ...func(*Options)) (*DeleteAttributesOutput, error) { + if params == nil { params = &DeleteAttributesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteAttributes", params, optFns, c.addOperationDeleteAttributesMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteAttributesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteAttributesInput struct { + + // The attributes to delete from your resource. You can specify up to 10 + // attributes for each request. For custom attributes, specify the attribute name + // and target ID, but don't specify the value. If you specify the target ID using + // the short form, you must also specify the target type. + // + // This member is required. + Attributes []types.Attribute + + // The short name or full Amazon Resource Name (ARN) of the cluster that contains + // the resource to delete attributes. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + noSmithyDocumentSerde +} + +type DeleteAttributesOutput struct { + + // A list of attribute objects that were successfully deleted from your resource. + Attributes []types.Attribute + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteAttributes{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteAttributes{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteAttributesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAttributes(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteAttributes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteAttributes", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go new file mode 100644 index 00000000000..eae2f38759e --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go @@ -0,0 +1,168 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deletes the specified capacity provider. +// +// The FARGATE and FARGATE_SPOT capacity providers are reserved and can't be +// deleted. You can disassociate them from a cluster using either [PutCapacityProviderProviders]or by deleting +// the cluster. +// +// Prior to a capacity provider being deleted, the capacity provider must be +// removed from the capacity provider strategy from all services. The [UpdateService]API can be +// used to remove a capacity provider from a service's capacity provider strategy. +// When updating a service, the forceNewDeployment option can be used to ensure +// that any tasks using the Amazon EC2 instance capacity provided by the capacity +// provider are transitioned to use the capacity from the remaining capacity +// providers. Only capacity providers that aren't associated with a cluster can be +// deleted. To remove a capacity provider from a cluster, you can either use [PutCapacityProviderProviders]or +// delete the cluster. +// +// [PutCapacityProviderProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProviderProviders.html +// [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html +func (c *Client) DeleteCapacityProvider(ctx context.Context, params *DeleteCapacityProviderInput, optFns ...func(*Options)) (*DeleteCapacityProviderOutput, error) { + if params == nil { params = &DeleteCapacityProviderInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCapacityProvider", params, optFns, c.addOperationDeleteCapacityProviderMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteCapacityProviderOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteCapacityProviderInput struct { + + // The short name or full Amazon Resource Name (ARN) of the capacity provider to + // delete. + // + // This member is required. + CapacityProvider *string + + noSmithyDocumentSerde +} + +type DeleteCapacityProviderOutput struct { + + // The details of the capacity provider. + CapacityProvider *types.CapacityProvider + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteCapacityProviderMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCapacityProvider{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCapacityProvider{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteCapacityProvider"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteCapacityProviderValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCapacityProvider(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCapacityProvider(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteCapacityProvider", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go new file mode 100644 index 00000000000..d8d377e3a8a --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deletes the specified cluster. The cluster transitions to the INACTIVE state. +// Clusters with an INACTIVE status might remain discoverable in your account for +// a period of time. However, this behavior is subject to change in the future. We +// don't recommend that you rely on INACTIVE clusters persisting. +// +// You must deregister all container instances from this cluster before you may +// delete it. You can list the container instances in a cluster with [ListContainerInstances]and +// deregister them with [DeregisterContainerInstance]. +// +// [ListContainerInstances]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListContainerInstances.html +// [DeregisterContainerInstance]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterContainerInstance.html +func (c *Client) DeleteCluster(ctx context.Context, params *DeleteClusterInput, optFns ...func(*Options)) (*DeleteClusterOutput, error) { + if params == nil { params = &DeleteClusterInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCluster", params, optFns, c.addOperationDeleteClusterMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteClusterInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster to delete. + // + // This member is required. + Cluster *string + + noSmithyDocumentSerde +} + +type DeleteClusterOutput struct { + + // The full description of the deleted cluster. + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCluster{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCluster{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCluster(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteCluster", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go new file mode 100644 index 00000000000..a6b89e8272a --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deletes a specified service within a cluster. You can delete a service if you +// have no running tasks in it and the desired task count is zero. If the service +// is actively maintaining tasks, you can't delete it, and you must update the +// service to a desired task count of zero. For more information, see [UpdateService]. +// +// When you delete a service, if there are still running tasks that require +// cleanup, the service status moves from ACTIVE to DRAINING , and the service is +// no longer visible in the console or in the [ListServices]API operation. After all tasks have +// transitioned to either STOPPING or STOPPED status, the service status moves +// from DRAINING to INACTIVE . Services in the DRAINING or INACTIVE status can +// still be viewed with the [DescribeServices]API operation. However, in the future, INACTIVE +// services may be cleaned up and purged from Amazon ECS record keeping, and [DescribeServices]calls +// on those services return a ServiceNotFoundException error. +// +// If you attempt to create a new service with the same name as an existing +// service in either ACTIVE or DRAINING status, you receive an error. +// +// [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html +// [ListServices]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListServices.html +// [DescribeServices]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeServices.html +func (c *Client) DeleteService(ctx context.Context, params *DeleteServiceInput, optFns ...func(*Options)) (*DeleteServiceOutput, error) { + if params == nil { params = &DeleteServiceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteService", params, optFns, c.addOperationDeleteServiceMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteServiceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteServiceInput struct { + + // The name of the service to delete. + // + // This member is required. + Service *string + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service to delete. If you do not specify a cluster, the default cluster is + // assumed. + Cluster *string + + // If true , allows you to delete a service even if it wasn't scaled down to zero + // tasks. It's only necessary to use this if the service uses the REPLICA + // scheduling strategy. + Force *bool + + noSmithyDocumentSerde +} + +type DeleteServiceOutput struct { + + // The full description of the deleted service. + Service *types.Service + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteServiceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteService{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteService{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteService"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteServiceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteService(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteService(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteService", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go new file mode 100644 index 00000000000..ef4c61101ac --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go @@ -0,0 +1,182 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deletes one or more task definitions. +// +// You must deregister a task definition revision before you delete it. For more +// information, see [DeregisterTaskDefinition]. +// +// When you delete a task definition revision, it is immediately transitions from +// the INACTIVE to DELETE_IN_PROGRESS . Existing tasks and services that reference +// a DELETE_IN_PROGRESS task definition revision continue to run without +// disruption. Existing services that reference a DELETE_IN_PROGRESS task +// definition revision can still scale up or down by modifying the service's +// desired count. +// +// You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or +// create new services. You also can't update an existing service to reference a +// DELETE_IN_PROGRESS task definition revision. +// +// A task definition revision will stay in DELETE_IN_PROGRESS status until all the +// associated tasks and services have been terminated. +// +// When you delete all INACTIVE task definition revisions, the task definition +// name is not displayed in the console and not returned in the API. If a task +// definition revisions are in the DELETE_IN_PROGRESS state, the task definition +// name is displayed in the console and returned in the API. The task definition +// name is retained by Amazon ECS and the revision is incremented the next time you +// create a task definition with that name. +// +// [DeregisterTaskDefinition]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterTaskDefinition.html +func (c *Client) DeleteTaskDefinitions(ctx context.Context, params *DeleteTaskDefinitionsInput, optFns ...func(*Options)) (*DeleteTaskDefinitionsOutput, error) { + if params == nil { params = &DeleteTaskDefinitionsInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteTaskDefinitions", params, optFns, c.addOperationDeleteTaskDefinitionsMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteTaskDefinitionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteTaskDefinitionsInput struct { + + // The family and revision ( family:revision ) or full Amazon Resource Name (ARN) + // of the task definition to delete. You must specify a revision . + // + // You can specify up to 10 task definitions as a comma separated list. + // + // This member is required. + TaskDefinitions []string + + noSmithyDocumentSerde +} + +type DeleteTaskDefinitionsOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // The list of deleted task definitions. + TaskDefinitions []types.TaskDefinition + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteTaskDefinitionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteTaskDefinitions{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteTaskDefinitions{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteTaskDefinitions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteTaskDefinitionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteTaskDefinitions(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteTaskDefinitions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteTaskDefinitions", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go b/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go new file mode 100644 index 00000000000..e3ae5fd78cc --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deletes a specified task set within a service. This is used when a service uses +// the EXTERNAL deployment controller type. For more information, see [Amazon ECS deployment types] in the +// Amazon Elastic Container Service Developer Guide. +// +// [Amazon ECS deployment types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +func (c *Client) DeleteTaskSet(ctx context.Context, params *DeleteTaskSetInput, optFns ...func(*Options)) (*DeleteTaskSetOutput, error) { + if params == nil { params = &DeleteTaskSetInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeleteTaskSet", params, optFns, c.addOperationDeleteTaskSetMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeleteTaskSetOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteTaskSetInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service that the task set found in to delete. + // + // This member is required. + Cluster *string + + // The short name or full Amazon Resource Name (ARN) of the service that hosts the + // task set to delete. + // + // This member is required. + Service *string + + // The task set ID or full Amazon Resource Name (ARN) of the task set to delete. + // + // This member is required. + TaskSet *string + + // If true , you can delete a task set even if it hasn't been scaled down to zero. + Force *bool + + noSmithyDocumentSerde +} + +type DeleteTaskSetOutput struct { + + // Details about the task set. + TaskSet *types.TaskSet + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteTaskSetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteTaskSet{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteTaskSet{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteTaskSet"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteTaskSetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteTaskSet(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteTaskSet(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteTaskSet", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go b/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go new file mode 100644 index 00000000000..2db1311e6a4 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deregisters an Amazon ECS container instance from the specified cluster. This +// instance is no longer available to run tasks. +// +// If you intend to use the container instance for some other purpose after +// deregistration, we recommend that you stop all of the tasks running on the +// container instance before deregistration. That prevents any orphaned tasks from +// consuming resources. +// +// Deregistering a container instance removes the instance from a cluster, but it +// doesn't terminate the EC2 instance. If you are finished using the instance, be +// sure to terminate it in the Amazon EC2 console to stop billing. +// +// If you terminate a running container instance, Amazon ECS automatically +// deregisters the instance from your cluster (stopped container instances or +// instances with disconnected agents aren't automatically deregistered when +// terminated). +func (c *Client) DeregisterContainerInstance(ctx context.Context, params *DeregisterContainerInstanceInput, optFns ...func(*Options)) (*DeregisterContainerInstanceOutput, error) { + if params == nil { params = &DeregisterContainerInstanceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeregisterContainerInstance", params, optFns, c.addOperationDeregisterContainerInstanceMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeregisterContainerInstanceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeregisterContainerInstanceInput struct { + + // The container instance ID or full ARN of the container instance to deregister. + // For more information about the ARN format, see [Amazon Resource Name (ARN)]in the Amazon ECS Developer + // Guide. + // + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + // + // This member is required. + ContainerInstance *string + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // container instance to deregister. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + // Forces the container instance to be deregistered. If you have tasks running on + // the container instance when you deregister it with the force option, these + // tasks remain running until you terminate the instance or the tasks stop through + // some other means, but they're orphaned (no longer monitored or accounted for by + // Amazon ECS). If an orphaned task on your container instance is part of an Amazon + // ECS service, then the service scheduler starts another copy of that task, on a + // different container instance if possible. + // + // Any containers in orphaned service tasks that are registered with a Classic + // Load Balancer or an Application Load Balancer target group are deregistered. + // They begin connection draining according to the settings on the load balancer or + // target group. + Force *bool + + noSmithyDocumentSerde +} + +type DeregisterContainerInstanceOutput struct { + + // The container instance that was deregistered. + ContainerInstance *types.ContainerInstance + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeregisterContainerInstanceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeregisterContainerInstance{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeregisterContainerInstance{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeregisterContainerInstance"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeregisterContainerInstanceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterContainerInstance(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeregisterContainerInstance(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeregisterContainerInstance", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go b/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go new file mode 100644 index 00000000000..b4465939e7d --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Deregisters the specified task definition by family and revision. Upon +// deregistration, the task definition is marked as INACTIVE . Existing tasks and +// services that reference an INACTIVE task definition continue to run without +// disruption. Existing services that reference an INACTIVE task definition can +// still scale up or down by modifying the service's desired count. If you want to +// delete a task definition revision, you must first deregister the task definition +// revision. +// +// You can't use an INACTIVE task definition to run new tasks or create new +// services, and you can't update an existing service to reference an INACTIVE +// task definition. However, there may be up to a 10-minute window following +// deregistration where these restrictions have not yet taken effect. +// +// At this time, INACTIVE task definitions remain discoverable in your account +// indefinitely. However, this behavior is subject to change in the future. We +// don't recommend that you rely on INACTIVE task definitions persisting beyond +// the lifecycle of any associated tasks and services. +// +// You must deregister a task definition revision before you delete it. For more +// information, see [DeleteTaskDefinitions]. +// +// [DeleteTaskDefinitions]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskDefinitions.html +func (c *Client) DeregisterTaskDefinition(ctx context.Context, params *DeregisterTaskDefinitionInput, optFns ...func(*Options)) (*DeregisterTaskDefinitionOutput, error) { + if params == nil { params = &DeregisterTaskDefinitionInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DeregisterTaskDefinition", params, optFns, c.addOperationDeregisterTaskDefinitionMiddlewares) + if err != nil { return nil, err } + + out := result.(*DeregisterTaskDefinitionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeregisterTaskDefinitionInput struct { + + // The family and revision ( family:revision ) or full Amazon Resource Name (ARN) + // of the task definition to deregister. You must specify a revision . + // + // This member is required. + TaskDefinition *string + + noSmithyDocumentSerde +} + +type DeregisterTaskDefinitionOutput struct { + + // The full description of the deregistered task. + TaskDefinition *types.TaskDefinition + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeregisterTaskDefinitionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeregisterTaskDefinition{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeregisterTaskDefinition{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeregisterTaskDefinition"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeregisterTaskDefinitionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterTaskDefinition(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeregisterTaskDefinition(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeregisterTaskDefinition", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go new file mode 100644 index 00000000000..e01d8950eff --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes one or more of your capacity providers. +func (c *Client) DescribeCapacityProviders(ctx context.Context, params *DescribeCapacityProvidersInput, optFns ...func(*Options)) (*DescribeCapacityProvidersOutput, error) { + if params == nil { params = &DescribeCapacityProvidersInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCapacityProviders", params, optFns, c.addOperationDescribeCapacityProvidersMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeCapacityProvidersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCapacityProvidersInput struct { + + // The short name or full Amazon Resource Name (ARN) of one or more capacity + // providers. Up to 100 capacity providers can be described in an action. + CapacityProviders []string + + // Specifies whether or not you want to see the resource tags for the capacity + // provider. If TAGS is specified, the tags are included in the response. If this + // field is omitted, tags aren't included in the response. + Include []types.CapacityProviderField + + // The maximum number of account setting results returned by + // DescribeCapacityProviders in paginated output. When this parameter is used, + // DescribeCapacityProviders only returns maxResults results in a single page + // along with a nextToken response element. The remaining results of the initial + // request can be seen by sending another DescribeCapacityProviders request with + // the returned nextToken value. This value can be between 1 and 10. If this + // parameter is not used, then DescribeCapacityProviders returns up to 10 results + // and a nextToken value if applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeCapacityProviders + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeCapacityProvidersOutput struct { + + // The list of capacity providers. + CapacityProviders []types.CapacityProvider + + // Any failures associated with the call. + Failures []types.Failure + + // The nextToken value to include in a future DescribeCapacityProviders request. + // When the results of a DescribeCapacityProviders request exceed maxResults , this + // value can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCapacityProvidersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeCapacityProviders{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeCapacityProviders{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeCapacityProviders"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityProviders(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeCapacityProviders(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeCapacityProviders", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go new file mode 100644 index 00000000000..2fc8b0de29c --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go @@ -0,0 +1,166 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes one or more of your clusters. +func (c *Client) DescribeClusters(ctx context.Context, params *DescribeClustersInput, optFns ...func(*Options)) (*DescribeClustersOutput, error) { + if params == nil { params = &DescribeClustersInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeClusters", params, optFns, c.addOperationDescribeClustersMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeClustersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeClustersInput struct { + + // A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) + // entries. If you do not specify a cluster, the default cluster is assumed. + Clusters []string + + // Determines whether to include additional information about the clusters in the + // response. If this field is omitted, this information isn't included. + // + // If ATTACHMENTS is specified, the attachments for the container instances or + // tasks within the cluster are included, for example the capacity providers. + // + // If SETTINGS is specified, the settings for the cluster are included. + // + // If CONFIGURATIONS is specified, the configuration for the cluster is included. + // + // If STATISTICS is specified, the task and service count is included, separated + // by launch type. + // + // If TAGS is specified, the metadata tags associated with the cluster are + // included. + Include []types.ClusterField + + noSmithyDocumentSerde +} + +type DescribeClustersOutput struct { + + // The list of clusters. + Clusters []types.Cluster + + // Any failures associated with the call. + Failures []types.Failure + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeClustersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeClusters{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeClusters{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeClusters"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeClusters(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeClusters(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeClusters", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go new file mode 100644 index 00000000000..84724bf5333 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes one or more container instances. Returns metadata about each +// container instance requested. +func (c *Client) DescribeContainerInstances(ctx context.Context, params *DescribeContainerInstancesInput, optFns ...func(*Options)) (*DescribeContainerInstancesOutput, error) { + if params == nil { params = &DescribeContainerInstancesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeContainerInstances", params, optFns, c.addOperationDescribeContainerInstancesMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeContainerInstancesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeContainerInstancesInput struct { + + // A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) + // entries. + // + // This member is required. + ContainerInstances []string + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // container instances to describe. If you do not specify a cluster, the default + // cluster is assumed. This parameter is required if the container instance or + // container instances you are describing were launched in any cluster other than + // the default cluster. + Cluster *string + + // Specifies whether you want to see the resource tags for the container instance. + // If TAGS is specified, the tags are included in the response. If + // CONTAINER_INSTANCE_HEALTH is specified, the container instance health is + // included in the response. If this field is omitted, tags and container instance + // health status aren't included in the response. + Include []types.ContainerInstanceField + + noSmithyDocumentSerde +} + +type DescribeContainerInstancesOutput struct { + + // The list of container instances. + ContainerInstances []types.ContainerInstance + + // Any failures associated with the call. + Failures []types.Failure + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeContainerInstancesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeContainerInstances{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeContainerInstances{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeContainerInstances"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDescribeContainerInstancesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeContainerInstances(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeContainerInstances(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeContainerInstances", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go new file mode 100644 index 00000000000..f9255bd897f --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go @@ -0,0 +1,626 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + jmespath "github.com/jmespath/go-jmespath" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithytime "github.com/aws/smithy-go/time" + smithywaiter "github.com/aws/smithy-go/waiter" + "strconv" + "time" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes the specified services running in your cluster. +func (c *Client) DescribeServices(ctx context.Context, params *DescribeServicesInput, optFns ...func(*Options)) (*DescribeServicesOutput, error) { + if params == nil { params = &DescribeServicesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeServices", params, optFns, c.addOperationDescribeServicesMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeServicesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeServicesInput struct { + + // A list of services to describe. You may specify up to 10 services to describe + // in a single operation. + // + // This member is required. + Services []string + + // The short name or full Amazon Resource Name (ARN)the cluster that hosts the + // service to describe. If you do not specify a cluster, the default cluster is + // assumed. This parameter is required if the service or services you are + // describing were launched in any cluster other than the default cluster. + Cluster *string + + // Determines whether you want to see the resource tags for the service. If TAGS + // is specified, the tags are included in the response. If this field is omitted, + // tags aren't included in the response. + Include []types.ServiceField + + noSmithyDocumentSerde +} + +type DescribeServicesOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // The list of services described. + Services []types.Service + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeServicesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeServices{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeServices{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeServices"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDescribeServicesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeServices(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ServicesInactiveWaiterOptions are waiter options for ServicesInactiveWaiter +type ServicesInactiveWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ServicesInactiveWaiter will use default minimum delay of 15 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ServicesInactiveWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeServicesInput, *DescribeServicesOutput, error) (bool, error) +} + +// ServicesInactiveWaiter defines the waiters for ServicesInactive +type ServicesInactiveWaiter struct { + + client DescribeServicesAPIClient + + options ServicesInactiveWaiterOptions +} + +// NewServicesInactiveWaiter constructs a ServicesInactiveWaiter. +func NewServicesInactiveWaiter(client DescribeServicesAPIClient, optFns ...func(*ServicesInactiveWaiterOptions)) *ServicesInactiveWaiter { + options := ServicesInactiveWaiterOptions{} + options.MinDelay = 15 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = servicesInactiveStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ServicesInactiveWaiter { + client: client, + options: options, + } +} + +// Wait calls the waiter function for ServicesInactive waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. +func (w *ServicesInactiveWaiter) Wait(ctx context.Context, params *DescribeServicesInput, maxWaitDur time.Duration, optFns ...func(*ServicesInactiveWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ServicesInactive waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *ServicesInactiveWaiter) WaitForOutput(ctx context.Context, params *DescribeServicesInput, maxWaitDur time.Duration, optFns ...func(*ServicesInactiveWaiterOptions)) (*DescribeServicesOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeServices(ctx, params, func (o *Options) { + baseOpts := []func(*Options) { + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { return nil, err } + if !retryable { return out, nil } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { return nil, fmt.Errorf("error computing waiter delay, %w", err)} + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ServicesInactive waiter") +} + +func servicesInactiveStateRetryable(ctx context.Context, input *DescribeServicesInput, output *DescribeServicesOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("failures[].reason", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "MISSING" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("services[].status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "INACTIVE" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, nil + } + } + } + + return true, nil +} + +// ServicesStableWaiterOptions are waiter options for ServicesStableWaiter +type ServicesStableWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ServicesStableWaiter will use default minimum delay of 15 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ServicesStableWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeServicesInput, *DescribeServicesOutput, error) (bool, error) +} + +// ServicesStableWaiter defines the waiters for ServicesStable +type ServicesStableWaiter struct { + + client DescribeServicesAPIClient + + options ServicesStableWaiterOptions +} + +// NewServicesStableWaiter constructs a ServicesStableWaiter. +func NewServicesStableWaiter(client DescribeServicesAPIClient, optFns ...func(*ServicesStableWaiterOptions)) *ServicesStableWaiter { + options := ServicesStableWaiterOptions{} + options.MinDelay = 15 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = servicesStableStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ServicesStableWaiter { + client: client, + options: options, + } +} + +// Wait calls the waiter function for ServicesStable waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ServicesStableWaiter) Wait(ctx context.Context, params *DescribeServicesInput, maxWaitDur time.Duration, optFns ...func(*ServicesStableWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ServicesStable waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *ServicesStableWaiter) WaitForOutput(ctx context.Context, params *DescribeServicesInput, maxWaitDur time.Duration, optFns ...func(*ServicesStableWaiterOptions)) (*DescribeServicesOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeServices(ctx, params, func (o *Options) { + baseOpts := []func(*Options) { + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { return nil, err } + if !retryable { return out, nil } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { return nil, fmt.Errorf("error computing waiter delay, %w", err)} + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ServicesStable waiter") +} + +func servicesStableStateRetryable(ctx context.Context, input *DescribeServicesInput, output *DescribeServicesOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("failures[].reason", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "MISSING" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("services[].status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "DRAINING" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("services[].status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "INACTIVE" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("length(services[?!(length(deployments) == `1` && runningCount == desiredCount)]) == `0`", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "true" + bv, err := strconv.ParseBool(expectedValue) + if err != nil { return false, fmt.Errorf("error parsing boolean from string %w", err)} + value, ok := pathValue.(bool) + if !ok { + return false, fmt.Errorf("waiter comparator expected bool value got %T", pathValue) + } + + if value == bv { + return false, nil + } + } + + return true, nil +} + +// DescribeServicesAPIClient is a client that implements the DescribeServices +// operation. +type DescribeServicesAPIClient interface { + DescribeServices(context.Context, *DescribeServicesInput, ...func(*Options)) (*DescribeServicesOutput, error) +} + +var _ DescribeServicesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeServices(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeServices", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go new file mode 100644 index 00000000000..28ce41a1446 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes a task definition. You can specify a family and revision to find +// information about a specific task definition, or you can simply specify the +// family to find the latest ACTIVE revision in that family. +// +// You can only describe INACTIVE task definitions while an active task or service +// references them. +func (c *Client) DescribeTaskDefinition(ctx context.Context, params *DescribeTaskDefinitionInput, optFns ...func(*Options)) (*DescribeTaskDefinitionOutput, error) { + if params == nil { params = &DescribeTaskDefinitionInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeTaskDefinition", params, optFns, c.addOperationDescribeTaskDefinitionMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeTaskDefinitionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeTaskDefinitionInput struct { + + // The family for the latest ACTIVE revision, family and revision ( family:revision + // ) for a specific revision in the family, or full Amazon Resource Name (ARN) of + // the task definition to describe. + // + // This member is required. + TaskDefinition *string + + // Determines whether to see the resource tags for the task definition. If TAGS is + // specified, the tags are included in the response. If this field is omitted, tags + // aren't included in the response. + Include []types.TaskDefinitionField + + noSmithyDocumentSerde +} + +type DescribeTaskDefinitionOutput struct { + + // The metadata that's applied to the task definition to help you categorize and + // organize them. Each tag consists of a key and an optional value. You define + // both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + // The full task definition description. + TaskDefinition *types.TaskDefinition + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeTaskDefinitionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeTaskDefinition{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeTaskDefinition{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTaskDefinition"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDescribeTaskDefinitionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTaskDefinition(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeTaskDefinition(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeTaskDefinition", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go new file mode 100644 index 00000000000..ca769ba62a4 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes the task sets in the specified cluster and service. This is used when +// a service uses the EXTERNAL deployment controller type. For more information, +// see [Amazon ECS Deployment Types]in the Amazon Elastic Container Service Developer Guide. +// +// [Amazon ECS Deployment Types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +func (c *Client) DescribeTaskSets(ctx context.Context, params *DescribeTaskSetsInput, optFns ...func(*Options)) (*DescribeTaskSetsOutput, error) { + if params == nil { params = &DescribeTaskSetsInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeTaskSets", params, optFns, c.addOperationDescribeTaskSetsMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeTaskSetsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeTaskSetsInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service that the task sets exist in. + // + // This member is required. + Cluster *string + + // The short name or full Amazon Resource Name (ARN) of the service that the task + // sets exist in. + // + // This member is required. + Service *string + + // Specifies whether to see the resource tags for the task set. If TAGS is + // specified, the tags are included in the response. If this field is omitted, tags + // aren't included in the response. + Include []types.TaskSetField + + // The ID or full Amazon Resource Name (ARN) of task sets to describe. + TaskSets []string + + noSmithyDocumentSerde +} + +type DescribeTaskSetsOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // The list of task sets described. + TaskSets []types.TaskSet + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeTaskSetsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeTaskSets{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeTaskSets{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTaskSets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDescribeTaskSetsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTaskSets(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeTaskSets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeTaskSets", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go b/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go new file mode 100644 index 00000000000..891ea178f51 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go @@ -0,0 +1,593 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + jmespath "github.com/jmespath/go-jmespath" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithytime "github.com/aws/smithy-go/time" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Describes a specified task or tasks. +// +// Currently, stopped tasks appear in the returned results for at least one hour. +// +// If you have tasks with tags, and then delete the cluster, the tagged tasks are +// returned in the response. If you create a new cluster with the same name as the +// deleted cluster, the tagged tasks are not included in the response. +func (c *Client) DescribeTasks(ctx context.Context, params *DescribeTasksInput, optFns ...func(*Options)) (*DescribeTasksOutput, error) { + if params == nil { params = &DescribeTasksInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DescribeTasks", params, optFns, c.addOperationDescribeTasksMiddlewares) + if err != nil { return nil, err } + + out := result.(*DescribeTasksOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeTasksInput struct { + + // A list of up to 100 task IDs or full ARN entries. + // + // This member is required. + Tasks []string + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // task or tasks to describe. If you do not specify a cluster, the default cluster + // is assumed. This parameter is required if the task or tasks you are describing + // were launched in any cluster other than the default cluster. + Cluster *string + + // Specifies whether you want to see the resource tags for the task. If TAGS is + // specified, the tags are included in the response. If this field is omitted, tags + // aren't included in the response. + Include []types.TaskField + + noSmithyDocumentSerde +} + +type DescribeTasksOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // The list of tasks. + Tasks []types.Task + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeTasksMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeTasks{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeTasks{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTasks"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDescribeTasksValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTasks(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// TasksRunningWaiterOptions are waiter options for TasksRunningWaiter +type TasksRunningWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // TasksRunningWaiter will use default minimum delay of 6 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, TasksRunningWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeTasksInput, *DescribeTasksOutput, error) (bool, error) +} + +// TasksRunningWaiter defines the waiters for TasksRunning +type TasksRunningWaiter struct { + + client DescribeTasksAPIClient + + options TasksRunningWaiterOptions +} + +// NewTasksRunningWaiter constructs a TasksRunningWaiter. +func NewTasksRunningWaiter(client DescribeTasksAPIClient, optFns ...func(*TasksRunningWaiterOptions)) *TasksRunningWaiter { + options := TasksRunningWaiterOptions{} + options.MinDelay = 6 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = tasksRunningStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &TasksRunningWaiter { + client: client, + options: options, + } +} + +// Wait calls the waiter function for TasksRunning waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *TasksRunningWaiter) Wait(ctx context.Context, params *DescribeTasksInput, maxWaitDur time.Duration, optFns ...func(*TasksRunningWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for TasksRunning waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *TasksRunningWaiter) WaitForOutput(ctx context.Context, params *DescribeTasksInput, maxWaitDur time.Duration, optFns ...func(*TasksRunningWaiterOptions)) (*DescribeTasksOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeTasks(ctx, params, func (o *Options) { + baseOpts := []func(*Options) { + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { return nil, err } + if !retryable { return out, nil } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { return nil, fmt.Errorf("error computing waiter delay, %w", err)} + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for TasksRunning waiter") +} + +func tasksRunningStateRetryable(ctx context.Context, input *DescribeTasksInput, output *DescribeTasksOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("tasks[].lastStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "STOPPED" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("failures[].reason", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "MISSING" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("tasks[].lastStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "RUNNING" + var match = true + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + if len(listOfValues) == 0 { match = false } + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) != expectedValue { match = false } + } + + if match { + return false, nil + } + } + + return true, nil +} + +// TasksStoppedWaiterOptions are waiter options for TasksStoppedWaiter +type TasksStoppedWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // TasksStoppedWaiter will use default minimum delay of 6 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, TasksStoppedWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeTasksInput, *DescribeTasksOutput, error) (bool, error) +} + +// TasksStoppedWaiter defines the waiters for TasksStopped +type TasksStoppedWaiter struct { + + client DescribeTasksAPIClient + + options TasksStoppedWaiterOptions +} + +// NewTasksStoppedWaiter constructs a TasksStoppedWaiter. +func NewTasksStoppedWaiter(client DescribeTasksAPIClient, optFns ...func(*TasksStoppedWaiterOptions)) *TasksStoppedWaiter { + options := TasksStoppedWaiterOptions{} + options.MinDelay = 6 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = tasksStoppedStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &TasksStoppedWaiter { + client: client, + options: options, + } +} + +// Wait calls the waiter function for TasksStopped waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *TasksStoppedWaiter) Wait(ctx context.Context, params *DescribeTasksInput, maxWaitDur time.Duration, optFns ...func(*TasksStoppedWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for TasksStopped waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *TasksStoppedWaiter) WaitForOutput(ctx context.Context, params *DescribeTasksInput, maxWaitDur time.Duration, optFns ...func(*TasksStoppedWaiterOptions)) (*DescribeTasksOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeTasks(ctx, params, func (o *Options) { + baseOpts := []func(*Options) { + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { return nil, err } + if !retryable { return out, nil } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { return nil, fmt.Errorf("error computing waiter delay, %w", err)} + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for TasksStopped waiter") +} + +func tasksStoppedStateRetryable(ctx context.Context, input *DescribeTasksInput, output *DescribeTasksOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("tasks[].lastStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "STOPPED" + var match = true + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + if len(listOfValues) == 0 { match = false } + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)} + + if string(*value) != expectedValue { match = false } + } + + if match { + return false, nil + } + } + + return true, nil +} + +// DescribeTasksAPIClient is a client that implements the DescribeTasks operation. +type DescribeTasksAPIClient interface { + DescribeTasks(context.Context, *DescribeTasksInput, ...func(*Options)) (*DescribeTasksOutput, error) +} + +var _ DescribeTasksAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeTasks(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeTasks", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go b/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go new file mode 100644 index 00000000000..92b587bd585 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action is only used by the Amazon ECS agent, and it is not intended for +// use outside of the agent. +// +// Returns an endpoint for the Amazon ECS agent to poll for updates. +func (c *Client) DiscoverPollEndpoint(ctx context.Context, params *DiscoverPollEndpointInput, optFns ...func(*Options)) (*DiscoverPollEndpointOutput, error) { + if params == nil { params = &DiscoverPollEndpointInput{} } + + result, metadata, err := c.invokeOperation(ctx, "DiscoverPollEndpoint", params, optFns, c.addOperationDiscoverPollEndpointMiddlewares) + if err != nil { return nil, err } + + out := result.(*DiscoverPollEndpointOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DiscoverPollEndpointInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that the + // container instance belongs to. + Cluster *string + + // The container instance ID or full ARN of the container instance. For more + // information about the ARN format, see [Amazon Resource Name (ARN)]in the Amazon ECS Developer Guide. + // + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + ContainerInstance *string + + // The availability zone of the container instance. This field is optional. + ZoneId *string + + noSmithyDocumentSerde +} + +type DiscoverPollEndpointOutput struct { + + // The endpoint for the Amazon ECS agent to poll. + Endpoint *string + + // The endpoint for the Amazon ECS agent to poll for Service Connect + // configuration. For more information, see [Service Connect]in the Amazon Elastic Container + // Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectEndpoint *string + + // The system logs endpoint for the Amazon ECS agent. + SystemLogsEndpoint *string + + // The telemetry endpoint for the Amazon ECS agent. + TelemetryEndpoint *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDiscoverPollEndpointMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDiscoverPollEndpoint{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDiscoverPollEndpoint{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DiscoverPollEndpoint"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDiscoverPollEndpoint(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDiscoverPollEndpoint(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DiscoverPollEndpoint", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go b/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go new file mode 100644 index 00000000000..682c576374d --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go @@ -0,0 +1,196 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Runs a command remotely on a container within a task. +// +// If you use a condition key in your IAM policy to refine the conditions for the +// policy statement, for example limit the actions to a specific cluster, you +// receive an AccessDeniedException when there is a mismatch between the condition +// key value and the corresponding parameter value. +// +// For information about required permissions and considerations, see [Using Amazon ECS Exec for debugging] in the +// Amazon ECS Developer Guide. +// +// [Using Amazon ECS Exec for debugging]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html +func (c *Client) ExecuteCommand(ctx context.Context, params *ExecuteCommandInput, optFns ...func(*Options)) (*ExecuteCommandOutput, error) { + if params == nil { params = &ExecuteCommandInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ExecuteCommand", params, optFns, c.addOperationExecuteCommandMiddlewares) + if err != nil { return nil, err } + + out := result.(*ExecuteCommandOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ExecuteCommandInput struct { + + // The command to run on the container. + // + // This member is required. + Command *string + + // Use this flag to run your command in interactive mode. + // + // This member is required. + Interactive bool + + // The Amazon Resource Name (ARN) or ID of the task the container is part of. + // + // This member is required. + Task *string + + // The Amazon Resource Name (ARN) or short name of the cluster the task is running + // in. If you do not specify a cluster, the default cluster is assumed. + Cluster *string + + // The name of the container to execute the command on. A container name only + // needs to be specified for tasks containing multiple containers. + Container *string + + noSmithyDocumentSerde +} + +type ExecuteCommandOutput struct { + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string + + // The Amazon Resource Name (ARN) of the container. + ContainerArn *string + + // The name of the container. + ContainerName *string + + // Determines whether the execute command session is running in interactive mode. + // Amazon ECS only supports initiating interactive sessions, so you must specify + // true for this value. + Interactive bool + + // The details of the SSM session that was created for this instance of + // execute-command. + Session *types.Session + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationExecuteCommandMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpExecuteCommand{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpExecuteCommand{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ExecuteCommand"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpExecuteCommandValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExecuteCommand(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opExecuteCommand(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ExecuteCommand", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go b/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go new file mode 100644 index 00000000000..8822fe41a5d --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go @@ -0,0 +1,165 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Retrieves the protection status of tasks in an Amazon ECS service. +func (c *Client) GetTaskProtection(ctx context.Context, params *GetTaskProtectionInput, optFns ...func(*Options)) (*GetTaskProtectionOutput, error) { + if params == nil { params = &GetTaskProtectionInput{} } + + result, metadata, err := c.invokeOperation(ctx, "GetTaskProtection", params, optFns, c.addOperationGetTaskProtectionMiddlewares) + if err != nil { return nil, err } + + out := result.(*GetTaskProtectionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetTaskProtectionInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service that the task sets exist in. + // + // This member is required. + Cluster *string + + // A list of up to 100 task IDs or full ARN entries. + Tasks []string + + noSmithyDocumentSerde +} + +type GetTaskProtectionOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // A list of tasks with the following information. + // + // - taskArn : The task ARN. + // + // - protectionEnabled : The protection status of the task. If scale-in + // protection is turned on for a task, the value is true . Otherwise, it is false + // . + // + // - expirationDate : The epoch time when protection for the task will expire. + ProtectedTasks []types.ProtectedTask + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetTaskProtectionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetTaskProtection{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetTaskProtection{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetTaskProtection"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetTaskProtectionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetTaskProtection(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetTaskProtection(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetTaskProtection", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go b/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go new file mode 100644 index 00000000000..e1efcb94884 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Lists the account settings for a specified principal. +func (c *Client) ListAccountSettings(ctx context.Context, params *ListAccountSettingsInput, optFns ...func(*Options)) (*ListAccountSettingsOutput, error) { + if params == nil { params = &ListAccountSettingsInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListAccountSettings", params, optFns, c.addOperationListAccountSettingsMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListAccountSettingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountSettingsInput struct { + + // Determines whether to return the effective settings. If true , the account + // settings for the root user or the default setting for the principalArn are + // returned. If false , the account settings for the principalArn are returned if + // they're set. Otherwise, no account settings are returned. + EffectiveSettings bool + + // The maximum number of account setting results returned by ListAccountSettings + // in paginated output. When this parameter is used, ListAccountSettings only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListAccountSettings request with the returned nextToken value. This + // value can be between 1 and 10. If this parameter isn't used, then + // ListAccountSettings returns up to 10 results and a nextToken value if + // applicable. + MaxResults int32 + + // The name of the account setting you want to list the settings for. + Name types.SettingName + + // The nextToken value returned from a ListAccountSettings request indicating that + // more results are available to fulfill the request and further calls will be + // needed. If maxResults was provided, it's possible the number of results to be + // fewer than maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The ARN of the principal, which can be a user, role, or the root user. If this + // field is omitted, the account settings are listed only for the authenticated + // user. + // + // Federated users assume the account setting of the root user and can't have + // explicit account settings set for them. + PrincipalArn *string + + // The value of the account settings to filter results with. You must also specify + // an account setting name to use this parameter. + Value *string + + noSmithyDocumentSerde +} + +type ListAccountSettingsOutput struct { + + // The nextToken value to include in a future ListAccountSettings request. When + // the results of a ListAccountSettings request exceed maxResults , this value can + // be used to retrieve the next page of results. This value is null when there are + // no more results to return. + NextToken *string + + // The account settings for the resource. + Settings []types.Setting + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListAccountSettings{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListAccountSettings{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountSettings"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountSettings(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListAccountSettingsPaginatorOptions is the paginator options for +// ListAccountSettings +type ListAccountSettingsPaginatorOptions struct { + // The maximum number of account setting results returned by ListAccountSettings + // in paginated output. When this parameter is used, ListAccountSettings only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListAccountSettings request with the returned nextToken value. This + // value can be between 1 and 10. If this parameter isn't used, then + // ListAccountSettings returns up to 10 results and a nextToken value if + // applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountSettingsPaginator is a paginator for ListAccountSettings +type ListAccountSettingsPaginator struct { + options ListAccountSettingsPaginatorOptions + client ListAccountSettingsAPIClient + params *ListAccountSettingsInput + nextToken *string + firstPage bool +} + +// NewListAccountSettingsPaginator returns a new ListAccountSettingsPaginator +func NewListAccountSettingsPaginator(client ListAccountSettingsAPIClient, params *ListAccountSettingsInput, optFns ...func(*ListAccountSettingsPaginatorOptions)) *ListAccountSettingsPaginator { + if params == nil { + params = &ListAccountSettingsInput{} + } + + options := ListAccountSettingsPaginatorOptions{} + if params.MaxResults != 0 { + options.Limit = params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountSettingsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountSettingsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListAccountSettings page. +func (p *ListAccountSettingsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountSettingsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + params.MaxResults = p.options.Limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListAccountSettings(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListAccountSettingsAPIClient is a client that implements the +// ListAccountSettings operation. +type ListAccountSettingsAPIClient interface { + ListAccountSettings(context.Context, *ListAccountSettingsInput, ...func(*Options)) (*ListAccountSettingsOutput, error) +} + +var _ ListAccountSettingsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListAccountSettings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccountSettings", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go b/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go new file mode 100644 index 00000000000..4fbba293088 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go @@ -0,0 +1,290 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Lists the attributes for Amazon ECS resources within a specified target type +// and cluster. When you specify a target type and cluster, ListAttributes returns +// a list of attribute objects, one for each attribute on each resource. You can +// filter the list of results to a single attribute name to only return results +// that have that name. You can also filter the results by attribute name and +// value. You can do this, for example, to see which container instances in a +// cluster are running a Linux AMI ( ecs.os-type=linux ). +func (c *Client) ListAttributes(ctx context.Context, params *ListAttributesInput, optFns ...func(*Options)) (*ListAttributesOutput, error) { + if params == nil { params = &ListAttributesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListAttributes", params, optFns, c.addOperationListAttributesMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListAttributesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAttributesInput struct { + + // The type of the target to list attributes with. + // + // This member is required. + TargetType types.TargetType + + // The name of the attribute to filter the results with. + AttributeName *string + + // The value of the attribute to filter results with. You must also specify an + // attribute name to use this parameter. + AttributeValue *string + + // The short name or full Amazon Resource Name (ARN) of the cluster to list + // attributes. If you do not specify a cluster, the default cluster is assumed. + Cluster *string + + // The maximum number of cluster results that ListAttributes returned in paginated + // output. When this parameter is used, ListAttributes only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListAttributes + // request with the returned nextToken value. This value can be between 1 and 100. + // If this parameter isn't used, then ListAttributes returns up to 100 results and + // a nextToken value if applicable. + MaxResults *int32 + + // The nextToken value returned from a ListAttributes request indicating that more + // results are available to fulfill the request and further calls are needed. If + // maxResults was provided, it's possible the number of results to be fewer than + // maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAttributesOutput struct { + + // A list of attribute objects that meet the criteria of the request. + Attributes []types.Attribute + + // The nextToken value to include in a future ListAttributes request. When the + // results of a ListAttributes request exceed maxResults , this value can be used + // to retrieve the next page of results. This value is null when there are no more + // results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListAttributes{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListAttributes{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListAttributesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAttributes(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListAttributesPaginatorOptions is the paginator options for ListAttributes +type ListAttributesPaginatorOptions struct { + // The maximum number of cluster results that ListAttributes returned in paginated + // output. When this parameter is used, ListAttributes only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListAttributes + // request with the returned nextToken value. This value can be between 1 and 100. + // If this parameter isn't used, then ListAttributes returns up to 100 results and + // a nextToken value if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAttributesPaginator is a paginator for ListAttributes +type ListAttributesPaginator struct { + options ListAttributesPaginatorOptions + client ListAttributesAPIClient + params *ListAttributesInput + nextToken *string + firstPage bool +} + +// NewListAttributesPaginator returns a new ListAttributesPaginator +func NewListAttributesPaginator(client ListAttributesAPIClient, params *ListAttributesInput, optFns ...func(*ListAttributesPaginatorOptions)) *ListAttributesPaginator { + if params == nil { + params = &ListAttributesInput{} + } + + options := ListAttributesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAttributesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAttributesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListAttributes page. +func (p *ListAttributesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAttributesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListAttributes(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListAttributesAPIClient is a client that implements the ListAttributes +// operation. +type ListAttributesAPIClient interface { + ListAttributes(context.Context, *ListAttributesInput, ...func(*Options)) (*ListAttributesOutput, error) +} + +var _ ListAttributesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListAttributes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAttributes", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go b/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go new file mode 100644 index 00000000000..b9ff4fa795a --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of existing clusters. +func (c *Client) ListClusters(ctx context.Context, params *ListClustersInput, optFns ...func(*Options)) (*ListClustersOutput, error) { + if params == nil { params = &ListClustersInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListClusters", params, optFns, c.addOperationListClustersMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListClustersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListClustersInput struct { + + // The maximum number of cluster results that ListClusters returned in paginated + // output. When this parameter is used, ListClusters only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListClusters + // request with the returned nextToken value. This value can be between 1 and 100. + // If this parameter isn't used, then ListClusters returns up to 100 results and a + // nextToken value if applicable. + MaxResults *int32 + + // The nextToken value returned from a ListClusters request indicating that more + // results are available to fulfill the request and further calls are needed. If + // maxResults was provided, it's possible the number of results to be fewer than + // maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + noSmithyDocumentSerde +} + +type ListClustersOutput struct { + + // The list of full Amazon Resource Name (ARN) entries for each cluster that's + // associated with your account. + ClusterArns []string + + // The nextToken value to include in a future ListClusters request. When the + // results of a ListClusters request exceed maxResults , this value can be used to + // retrieve the next page of results. This value is null when there are no more + // results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListClustersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListClusters{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListClusters{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListClusters"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListClusters(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListClustersPaginatorOptions is the paginator options for ListClusters +type ListClustersPaginatorOptions struct { + // The maximum number of cluster results that ListClusters returned in paginated + // output. When this parameter is used, ListClusters only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListClusters + // request with the returned nextToken value. This value can be between 1 and 100. + // If this parameter isn't used, then ListClusters returns up to 100 results and a + // nextToken value if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListClustersPaginator is a paginator for ListClusters +type ListClustersPaginator struct { + options ListClustersPaginatorOptions + client ListClustersAPIClient + params *ListClustersInput + nextToken *string + firstPage bool +} + +// NewListClustersPaginator returns a new ListClustersPaginator +func NewListClustersPaginator(client ListClustersAPIClient, params *ListClustersInput, optFns ...func(*ListClustersPaginatorOptions)) *ListClustersPaginator { + if params == nil { + params = &ListClustersInput{} + } + + options := ListClustersPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListClustersPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListClustersPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListClusters page. +func (p *ListClustersPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListClustersOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListClusters(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListClustersAPIClient is a client that implements the ListClusters operation. +type ListClustersAPIClient interface { + ListClusters(context.Context, *ListClustersInput, ...func(*Options)) (*ListClustersOutput, error) +} + +var _ ListClustersAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListClusters(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListClusters", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go b/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go new file mode 100644 index 00000000000..f7dd30b86f1 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go @@ -0,0 +1,294 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Returns a list of container instances in a specified cluster. You can filter +// the results of a ListContainerInstances operation with cluster query language +// statements inside the filter parameter. For more information, see [Cluster Query Language] in the +// Amazon Elastic Container Service Developer Guide. +// +// [Cluster Query Language]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html +func (c *Client) ListContainerInstances(ctx context.Context, params *ListContainerInstancesInput, optFns ...func(*Options)) (*ListContainerInstancesOutput, error) { + if params == nil { params = &ListContainerInstancesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListContainerInstances", params, optFns, c.addOperationListContainerInstancesMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListContainerInstancesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListContainerInstancesInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // container instances to list. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + // You can filter the results of a ListContainerInstances operation with cluster + // query language statements. For more information, see [Cluster Query Language]in the Amazon Elastic + // Container Service Developer Guide. + // + // [Cluster Query Language]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html + Filter *string + + // The maximum number of container instance results that ListContainerInstances + // returned in paginated output. When this parameter is used, + // ListContainerInstances only returns maxResults results in a single page along + // with a nextToken response element. The remaining results of the initial request + // can be seen by sending another ListContainerInstances request with the returned + // nextToken value. This value can be between 1 and 100. If this parameter isn't + // used, then ListContainerInstances returns up to 100 results and a nextToken + // value if applicable. + MaxResults *int32 + + // The nextToken value returned from a ListContainerInstances request indicating + // that more results are available to fulfill the request and further calls are + // needed. If maxResults was provided, it's possible the number of results to be + // fewer than maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // Filters the container instances by status. For example, if you specify the + // DRAINING status, the results include only container instances that have been set + // to DRAINING using [UpdateContainerInstancesState]. If you don't specify this parameter, the default is to + // include container instances set to all states other than INACTIVE . + // + // [UpdateContainerInstancesState]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateContainerInstancesState.html + Status types.ContainerInstanceStatus + + noSmithyDocumentSerde +} + +type ListContainerInstancesOutput struct { + + // The list of container instances with full ARN entries for each container + // instance associated with the specified cluster. + ContainerInstanceArns []string + + // The nextToken value to include in a future ListContainerInstances request. When + // the results of a ListContainerInstances request exceed maxResults , this value + // can be used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListContainerInstancesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListContainerInstances{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListContainerInstances{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListContainerInstances"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListContainerInstances(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListContainerInstancesPaginatorOptions is the paginator options for +// ListContainerInstances +type ListContainerInstancesPaginatorOptions struct { + // The maximum number of container instance results that ListContainerInstances + // returned in paginated output. When this parameter is used, + // ListContainerInstances only returns maxResults results in a single page along + // with a nextToken response element. The remaining results of the initial request + // can be seen by sending another ListContainerInstances request with the returned + // nextToken value. This value can be between 1 and 100. If this parameter isn't + // used, then ListContainerInstances returns up to 100 results and a nextToken + // value if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListContainerInstancesPaginator is a paginator for ListContainerInstances +type ListContainerInstancesPaginator struct { + options ListContainerInstancesPaginatorOptions + client ListContainerInstancesAPIClient + params *ListContainerInstancesInput + nextToken *string + firstPage bool +} + +// NewListContainerInstancesPaginator returns a new ListContainerInstancesPaginator +func NewListContainerInstancesPaginator(client ListContainerInstancesAPIClient, params *ListContainerInstancesInput, optFns ...func(*ListContainerInstancesPaginatorOptions)) *ListContainerInstancesPaginator { + if params == nil { + params = &ListContainerInstancesInput{} + } + + options := ListContainerInstancesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListContainerInstancesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListContainerInstancesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListContainerInstances page. +func (p *ListContainerInstancesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListContainerInstancesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListContainerInstances(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListContainerInstancesAPIClient is a client that implements the +// ListContainerInstances operation. +type ListContainerInstancesAPIClient interface { + ListContainerInstances(context.Context, *ListContainerInstancesInput, ...func(*Options)) (*ListContainerInstancesOutput, error) +} + +var _ ListContainerInstancesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListContainerInstances(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListContainerInstances", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListServices.go b/aws-sdk-go-v2/service/ecs/api_op_ListServices.go new file mode 100644 index 00000000000..b93491f2f8e --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListServices.go @@ -0,0 +1,277 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Returns a list of services. You can filter the results by cluster, launch type, +// and scheduling strategy. +func (c *Client) ListServices(ctx context.Context, params *ListServicesInput, optFns ...func(*Options)) (*ListServicesOutput, error) { + if params == nil { params = &ListServicesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListServices", params, optFns, c.addOperationListServicesMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListServicesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListServicesInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster to use when + // filtering the ListServices results. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string + + // The launch type to use when filtering the ListServices results. + LaunchType types.LaunchType + + // The maximum number of service results that ListServices returned in paginated + // output. When this parameter is used, ListServices only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListServices + // request with the returned nextToken value. This value can be between 1 and 100. + // If this parameter isn't used, then ListServices returns up to 10 results and a + // nextToken value if applicable. + MaxResults *int32 + + // The nextToken value returned from a ListServices request indicating that more + // results are available to fulfill the request and further calls will be needed. + // If maxResults was provided, it is possible the number of results to be fewer + // than maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The scheduling strategy to use when filtering the ListServices results. + SchedulingStrategy types.SchedulingStrategy + + noSmithyDocumentSerde +} + +type ListServicesOutput struct { + + // The nextToken value to include in a future ListServices request. When the + // results of a ListServices request exceed maxResults , this value can be used to + // retrieve the next page of results. This value is null when there are no more + // results to return. + NextToken *string + + // The list of full ARN entries for each service that's associated with the + // specified cluster. + ServiceArns []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListServicesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListServices{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListServices{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListServices"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListServices(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListServicesPaginatorOptions is the paginator options for ListServices +type ListServicesPaginatorOptions struct { + // The maximum number of service results that ListServices returned in paginated + // output. When this parameter is used, ListServices only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListServices + // request with the returned nextToken value. This value can be between 1 and 100. + // If this parameter isn't used, then ListServices returns up to 10 results and a + // nextToken value if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListServicesPaginator is a paginator for ListServices +type ListServicesPaginator struct { + options ListServicesPaginatorOptions + client ListServicesAPIClient + params *ListServicesInput + nextToken *string + firstPage bool +} + +// NewListServicesPaginator returns a new ListServicesPaginator +func NewListServicesPaginator(client ListServicesAPIClient, params *ListServicesInput, optFns ...func(*ListServicesPaginatorOptions)) *ListServicesPaginator { + if params == nil { + params = &ListServicesInput{} + } + + options := ListServicesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListServicesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListServicesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListServices page. +func (p *ListServicesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListServicesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListServices(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListServicesAPIClient is a client that implements the ListServices operation. +type ListServicesAPIClient interface { + ListServices(context.Context, *ListServicesInput, ...func(*Options)) (*ListServicesOutput, error) +} + +var _ ListServicesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListServices(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListServices", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go b/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go new file mode 100644 index 00000000000..7013e375759 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation lists all of the services that are associated with a Cloud Map +// namespace. This list might include services in different clusters. In contrast, +// ListServices can only list services in one cluster at a time. If you need to +// filter the list of services in a single cluster by various parameters, use +// ListServices . For more information, see [Service Connect] in the Amazon Elastic Container +// Service Developer Guide. +// +// [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html +func (c *Client) ListServicesByNamespace(ctx context.Context, params *ListServicesByNamespaceInput, optFns ...func(*Options)) (*ListServicesByNamespaceOutput, error) { + if params == nil { params = &ListServicesByNamespaceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListServicesByNamespace", params, optFns, c.addOperationListServicesByNamespaceMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListServicesByNamespaceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListServicesByNamespaceInput struct { + + // The namespace name or full Amazon Resource Name (ARN) of the Cloud Map + // namespace to list the services in. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + // + // This member is required. + Namespace *string + + // The maximum number of service results that ListServicesByNamespace returns in + // paginated output. When this parameter is used, ListServicesByNamespace only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListServicesByNamespace request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter isn't used, then + // ListServicesByNamespace returns up to 10 results and a nextToken value if + // applicable. + MaxResults *int32 + + // The nextToken value that's returned from a ListServicesByNamespace request. It + // indicates that more results are available to fulfill the request and further + // calls are needed. If maxResults is returned, it is possible the number of + // results is less than maxResults . + NextToken *string + + noSmithyDocumentSerde +} + +type ListServicesByNamespaceOutput struct { + + // The nextToken value to include in a future ListServicesByNamespace request. + // When the results of a ListServicesByNamespace request exceed maxResults , this + // value can be used to retrieve the next page of results. When there are no more + // results to return, this value is null . + NextToken *string + + // The list of full ARN entries for each service that's associated with the + // specified namespace. + ServiceArns []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListServicesByNamespaceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListServicesByNamespace{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListServicesByNamespace{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListServicesByNamespace"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListServicesByNamespaceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListServicesByNamespace(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListServicesByNamespacePaginatorOptions is the paginator options for +// ListServicesByNamespace +type ListServicesByNamespacePaginatorOptions struct { + // The maximum number of service results that ListServicesByNamespace returns in + // paginated output. When this parameter is used, ListServicesByNamespace only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListServicesByNamespace request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter isn't used, then + // ListServicesByNamespace returns up to 10 results and a nextToken value if + // applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListServicesByNamespacePaginator is a paginator for ListServicesByNamespace +type ListServicesByNamespacePaginator struct { + options ListServicesByNamespacePaginatorOptions + client ListServicesByNamespaceAPIClient + params *ListServicesByNamespaceInput + nextToken *string + firstPage bool +} + +// NewListServicesByNamespacePaginator returns a new +// ListServicesByNamespacePaginator +func NewListServicesByNamespacePaginator(client ListServicesByNamespaceAPIClient, params *ListServicesByNamespaceInput, optFns ...func(*ListServicesByNamespacePaginatorOptions)) *ListServicesByNamespacePaginator { + if params == nil { + params = &ListServicesByNamespaceInput{} + } + + options := ListServicesByNamespacePaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListServicesByNamespacePaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListServicesByNamespacePaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListServicesByNamespace page. +func (p *ListServicesByNamespacePaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListServicesByNamespaceOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListServicesByNamespace(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListServicesByNamespaceAPIClient is a client that implements the +// ListServicesByNamespace operation. +type ListServicesByNamespaceAPIClient interface { + ListServicesByNamespace(context.Context, *ListServicesByNamespaceInput, ...func(*Options)) (*ListServicesByNamespaceOutput, error) +} + +var _ ListServicesByNamespaceAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListServicesByNamespace(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListServicesByNamespace", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go b/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..c531f00a563 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go @@ -0,0 +1,152 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// List the tags for an Amazon ECS resource. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { params = &ListTagsForResourceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsForResourceInput struct { + + // The Amazon Resource Name (ARN) that identifies the resource to list the tags + // for. Currently, the supported resources are Amazon ECS tasks, services, task + // definitions, clusters, and container instances. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type ListTagsForResourceOutput struct { + + // The tags for the resource. + Tags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTagsForResource{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsForResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTagsForResource", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go b/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go new file mode 100644 index 00000000000..4cffdde6510 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Returns a list of task definition families that are registered to your account. +// This list includes task definition families that no longer have any ACTIVE task +// definition revisions. +// +// You can filter out task definition families that don't contain any ACTIVE task +// definition revisions by setting the status parameter to ACTIVE . You can also +// filter the results with the familyPrefix parameter. +func (c *Client) ListTaskDefinitionFamilies(ctx context.Context, params *ListTaskDefinitionFamiliesInput, optFns ...func(*Options)) (*ListTaskDefinitionFamiliesOutput, error) { + if params == nil { params = &ListTaskDefinitionFamiliesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListTaskDefinitionFamilies", params, optFns, c.addOperationListTaskDefinitionFamiliesMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListTaskDefinitionFamiliesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTaskDefinitionFamiliesInput struct { + + // The familyPrefix is a string that's used to filter the results of + // ListTaskDefinitionFamilies . If you specify a familyPrefix , only task + // definition family names that begin with the familyPrefix string are returned. + FamilyPrefix *string + + // The maximum number of task definition family results that + // ListTaskDefinitionFamilies returned in paginated output. When this parameter is + // used, ListTaskDefinitions only returns maxResults results in a single page + // along with a nextToken response element. The remaining results of the initial + // request can be seen by sending another ListTaskDefinitionFamilies request with + // the returned nextToken value. This value can be between 1 and 100. If this + // parameter isn't used, then ListTaskDefinitionFamilies returns up to 100 results + // and a nextToken value if applicable. + MaxResults *int32 + + // The nextToken value returned from a ListTaskDefinitionFamilies request + // indicating that more results are available to fulfill the request and further + // calls will be needed. If maxResults was provided, it is possible the number of + // results to be fewer than maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The task definition family status to filter the ListTaskDefinitionFamilies + // results with. By default, both ACTIVE and INACTIVE task definition families are + // listed. If this parameter is set to ACTIVE , only task definition families that + // have an ACTIVE task definition revision are returned. If this parameter is set + // to INACTIVE , only task definition families that do not have any ACTIVE task + // definition revisions are returned. If you paginate the resulting output, be sure + // to keep the status value constant in each subsequent request. + Status types.TaskDefinitionFamilyStatus + + noSmithyDocumentSerde +} + +type ListTaskDefinitionFamiliesOutput struct { + + // The list of task definition family names that match the + // ListTaskDefinitionFamilies request. + Families []string + + // The nextToken value to include in a future ListTaskDefinitionFamilies request. + // When the results of a ListTaskDefinitionFamilies request exceed maxResults , + // this value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTaskDefinitionFamiliesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTaskDefinitionFamilies{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTaskDefinitionFamilies{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTaskDefinitionFamilies"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTaskDefinitionFamilies(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTaskDefinitionFamiliesPaginatorOptions is the paginator options for +// ListTaskDefinitionFamilies +type ListTaskDefinitionFamiliesPaginatorOptions struct { + // The maximum number of task definition family results that + // ListTaskDefinitionFamilies returned in paginated output. When this parameter is + // used, ListTaskDefinitions only returns maxResults results in a single page + // along with a nextToken response element. The remaining results of the initial + // request can be seen by sending another ListTaskDefinitionFamilies request with + // the returned nextToken value. This value can be between 1 and 100. If this + // parameter isn't used, then ListTaskDefinitionFamilies returns up to 100 results + // and a nextToken value if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTaskDefinitionFamiliesPaginator is a paginator for +// ListTaskDefinitionFamilies +type ListTaskDefinitionFamiliesPaginator struct { + options ListTaskDefinitionFamiliesPaginatorOptions + client ListTaskDefinitionFamiliesAPIClient + params *ListTaskDefinitionFamiliesInput + nextToken *string + firstPage bool +} + +// NewListTaskDefinitionFamiliesPaginator returns a new +// ListTaskDefinitionFamiliesPaginator +func NewListTaskDefinitionFamiliesPaginator(client ListTaskDefinitionFamiliesAPIClient, params *ListTaskDefinitionFamiliesInput, optFns ...func(*ListTaskDefinitionFamiliesPaginatorOptions)) *ListTaskDefinitionFamiliesPaginator { + if params == nil { + params = &ListTaskDefinitionFamiliesInput{} + } + + options := ListTaskDefinitionFamiliesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTaskDefinitionFamiliesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTaskDefinitionFamiliesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListTaskDefinitionFamilies page. +func (p *ListTaskDefinitionFamiliesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTaskDefinitionFamiliesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTaskDefinitionFamilies(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListTaskDefinitionFamiliesAPIClient is a client that implements the +// ListTaskDefinitionFamilies operation. +type ListTaskDefinitionFamiliesAPIClient interface { + ListTaskDefinitionFamilies(context.Context, *ListTaskDefinitionFamiliesInput, ...func(*Options)) (*ListTaskDefinitionFamiliesOutput, error) +} + +var _ ListTaskDefinitionFamiliesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTaskDefinitionFamilies(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTaskDefinitionFamilies", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go b/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go new file mode 100644 index 00000000000..3be09d9f331 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Returns a list of task definitions that are registered to your account. You can +// filter the results by family name with the familyPrefix parameter or by status +// with the status parameter. +func (c *Client) ListTaskDefinitions(ctx context.Context, params *ListTaskDefinitionsInput, optFns ...func(*Options)) (*ListTaskDefinitionsOutput, error) { + if params == nil { params = &ListTaskDefinitionsInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListTaskDefinitions", params, optFns, c.addOperationListTaskDefinitionsMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListTaskDefinitionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTaskDefinitionsInput struct { + + // The full family name to filter the ListTaskDefinitions results with. Specifying + // a familyPrefix limits the listed task definitions to task definition revisions + // that belong to that family. + FamilyPrefix *string + + // The maximum number of task definition results that ListTaskDefinitions returned + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitions request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter isn't used, then + // ListTaskDefinitions returns up to 100 results and a nextToken value if + // applicable. + MaxResults *int32 + + // The nextToken value returned from a ListTaskDefinitions request indicating that + // more results are available to fulfill the request and further calls will be + // needed. If maxResults was provided, it is possible the number of results to be + // fewer than maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The order to sort the results in. Valid values are ASC and DESC . By default, ( + // ASC ) task definitions are listed lexicographically by family name and in + // ascending numerical order by revision so that the newest task definitions in a + // family are listed last. Setting this parameter to DESC reverses the sort order + // on family name and revision. This is so that the newest task definitions in a + // family are listed first. + Sort types.SortOrder + + // The task definition status to filter the ListTaskDefinitions results with. By + // default, only ACTIVE task definitions are listed. By setting this parameter to + // INACTIVE , you can view task definitions that are INACTIVE as long as an active + // task or service still references them. If you paginate the resulting output, be + // sure to keep the status value constant in each subsequent request. + Status types.TaskDefinitionStatus + + noSmithyDocumentSerde +} + +type ListTaskDefinitionsOutput struct { + + // The nextToken value to include in a future ListTaskDefinitions request. When + // the results of a ListTaskDefinitions request exceed maxResults , this value can + // be used to retrieve the next page of results. This value is null when there are + // no more results to return. + NextToken *string + + // The list of task definition Amazon Resource Name (ARN) entries for the + // ListTaskDefinitions request. + TaskDefinitionArns []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTaskDefinitionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTaskDefinitions{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTaskDefinitions{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTaskDefinitions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTaskDefinitions(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTaskDefinitionsPaginatorOptions is the paginator options for +// ListTaskDefinitions +type ListTaskDefinitionsPaginatorOptions struct { + // The maximum number of task definition results that ListTaskDefinitions returned + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitions request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter isn't used, then + // ListTaskDefinitions returns up to 100 results and a nextToken value if + // applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTaskDefinitionsPaginator is a paginator for ListTaskDefinitions +type ListTaskDefinitionsPaginator struct { + options ListTaskDefinitionsPaginatorOptions + client ListTaskDefinitionsAPIClient + params *ListTaskDefinitionsInput + nextToken *string + firstPage bool +} + +// NewListTaskDefinitionsPaginator returns a new ListTaskDefinitionsPaginator +func NewListTaskDefinitionsPaginator(client ListTaskDefinitionsAPIClient, params *ListTaskDefinitionsInput, optFns ...func(*ListTaskDefinitionsPaginatorOptions)) *ListTaskDefinitionsPaginator { + if params == nil { + params = &ListTaskDefinitionsInput{} + } + + options := ListTaskDefinitionsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTaskDefinitionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTaskDefinitionsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListTaskDefinitions page. +func (p *ListTaskDefinitionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTaskDefinitionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTaskDefinitions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListTaskDefinitionsAPIClient is a client that implements the +// ListTaskDefinitions operation. +type ListTaskDefinitionsAPIClient interface { + ListTaskDefinitions(context.Context, *ListTaskDefinitionsInput, ...func(*Options)) (*ListTaskDefinitionsOutput, error) +} + +var _ ListTaskDefinitionsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTaskDefinitions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTaskDefinitions", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go b/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go new file mode 100644 index 00000000000..22f04400475 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go @@ -0,0 +1,309 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Returns a list of tasks. You can filter the results by cluster, task definition +// family, container instance, launch type, what IAM principal started the task, or +// by the desired status of the task. +// +// Recently stopped tasks might appear in the returned results. +func (c *Client) ListTasks(ctx context.Context, params *ListTasksInput, optFns ...func(*Options)) (*ListTasksOutput, error) { + if params == nil { params = &ListTasksInput{} } + + result, metadata, err := c.invokeOperation(ctx, "ListTasks", params, optFns, c.addOperationListTasksMiddlewares) + if err != nil { return nil, err } + + out := result.(*ListTasksOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTasksInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster to use when + // filtering the ListTasks results. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + // The container instance ID or full ARN of the container instance to use when + // filtering the ListTasks results. Specifying a containerInstance limits the + // results to tasks that belong to that container instance. + ContainerInstance *string + + // The task desired status to use when filtering the ListTasks results. Specifying + // a desiredStatus of STOPPED limits the results to tasks that Amazon ECS has set + // the desired status to STOPPED . This can be useful for debugging tasks that + // aren't starting properly or have died or finished. The default status filter is + // RUNNING , which shows tasks that Amazon ECS has set the desired status to + // RUNNING . + // + // Although you can filter results based on a desired status of PENDING , this + // doesn't return any results. Amazon ECS never sets the desired status of a task + // to that value (only a task's lastStatus may have a value of PENDING ). + DesiredStatus types.DesiredStatus + + // The name of the task definition family to use when filtering the ListTasks + // results. Specifying a family limits the results to tasks that belong to that + // family. + Family *string + + // The launch type to use when filtering the ListTasks results. + LaunchType types.LaunchType + + // The maximum number of task results that ListTasks returned in paginated output. + // When this parameter is used, ListTasks only returns maxResults results in a + // single page along with a nextToken response element. The remaining results of + // the initial request can be seen by sending another ListTasks request with the + // returned nextToken value. This value can be between 1 and 100. If this + // parameter isn't used, then ListTasks returns up to 100 results and a nextToken + // value if applicable. + MaxResults *int32 + + // The nextToken value returned from a ListTasks request indicating that more + // results are available to fulfill the request and further calls will be needed. + // If maxResults was provided, it's possible the number of results to be fewer + // than maxResults . + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The name of the service to use when filtering the ListTasks results. Specifying + // a serviceName limits the results to tasks that belong to that service. + ServiceName *string + + // The startedBy value to filter the task results with. Specifying a startedBy + // value limits the results to tasks that were started with that value. + // + // When you specify startedBy as the filter, it must be the only filter that you + // use. + StartedBy *string + + noSmithyDocumentSerde +} + +type ListTasksOutput struct { + + // The nextToken value to include in a future ListTasks request. When the results + // of a ListTasks request exceed maxResults , this value can be used to retrieve + // the next page of results. This value is null when there are no more results to + // return. + NextToken *string + + // The list of task ARN entries for the ListTasks request. + TaskArns []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTasksMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTasks{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTasks{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTasks"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTasks(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTasksPaginatorOptions is the paginator options for ListTasks +type ListTasksPaginatorOptions struct { + // The maximum number of task results that ListTasks returned in paginated output. + // When this parameter is used, ListTasks only returns maxResults results in a + // single page along with a nextToken response element. The remaining results of + // the initial request can be seen by sending another ListTasks request with the + // returned nextToken value. This value can be between 1 and 100. If this + // parameter isn't used, then ListTasks returns up to 100 results and a nextToken + // value if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTasksPaginator is a paginator for ListTasks +type ListTasksPaginator struct { + options ListTasksPaginatorOptions + client ListTasksAPIClient + params *ListTasksInput + nextToken *string + firstPage bool +} + +// NewListTasksPaginator returns a new ListTasksPaginator +func NewListTasksPaginator(client ListTasksAPIClient, params *ListTasksInput, optFns ...func(*ListTasksPaginatorOptions)) *ListTasksPaginator { + if params == nil { + params = &ListTasksInput{} + } + + options := ListTasksPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTasksPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTasksPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0 ) +} + +// NextPage retrieves the next ListTasks page. +func (p *ListTasksPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTasksOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options) { + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTasks(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListTasksAPIClient is a client that implements the ListTasks operation. +type ListTasksAPIClient interface { + ListTasks(context.Context, *ListTasksInput, ...func(*Options)) (*ListTasksOutput, error) +} + +var _ ListTasksAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTasks(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTasks", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go b/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go new file mode 100644 index 00000000000..9487180bbf8 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go @@ -0,0 +1,258 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies an account setting. Account settings are set on a per-Region basis. +// +// If you change the root user account setting, the default settings are reset for +// users and roles that do not have specified individual account settings. For more +// information, see [Account Settings]in the Amazon Elastic Container Service Developer Guide. +// +// [Account Settings]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html +func (c *Client) PutAccountSetting(ctx context.Context, params *PutAccountSettingInput, optFns ...func(*Options)) (*PutAccountSettingOutput, error) { + if params == nil { params = &PutAccountSettingInput{} } + + result, metadata, err := c.invokeOperation(ctx, "PutAccountSetting", params, optFns, c.addOperationPutAccountSettingMiddlewares) + if err != nil { return nil, err } + + out := result.(*PutAccountSettingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutAccountSettingInput struct { + + // The Amazon ECS account setting name to modify. + // + // The following are the valid values for the account setting name. + // + // - serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and + // resource ID format of the resource type for a specified user, role, or the root + // user for an account is affected. The opt-in and opt-out account setting must be + // set for each Amazon ECS resource separately. The ARN and resource ID format of a + // resource is defined by the opt-in status of the user or role that created the + // resource. You must turn on this setting to use Amazon ECS features such as + // resource tagging. + // + // - taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and + // resource ID format of the resource type for a specified user, role, or the root + // user for an account is affected. The opt-in and opt-out account setting must be + // set for each Amazon ECS resource separately. The ARN and resource ID format of a + // resource is defined by the opt-in status of the user or role that created the + // resource. You must turn on this setting to use Amazon ECS features such as + // resource tagging. + // + // - containerInstanceLongArnFormat - When modified, the Amazon Resource Name + // (ARN) and resource ID format of the resource type for a specified user, role, or + // the root user for an account is affected. The opt-in and opt-out account setting + // must be set for each Amazon ECS resource separately. The ARN and resource ID + // format of a resource is defined by the opt-in status of the user or role that + // created the resource. You must turn on this setting to use Amazon ECS features + // such as resource tagging. + // + // - awsvpcTrunking - When modified, the elastic network interface (ENI) limit + // for any new container instances that support the feature is changed. If + // awsvpcTrunking is turned on, any new container instances that support the + // feature are launched have the increased ENI limits available to them. For more + // information, see [Elastic Network Interface Trunking]in the Amazon Elastic Container Service Developer Guide. + // + // - containerInsights - When modified, the default setting indicating whether + // Amazon Web Services CloudWatch Container Insights is turned on for your clusters + // is changed. If containerInsights is turned on, any new clusters that are + // created will have Container Insights turned on unless you disable it during + // cluster creation. For more information, see [CloudWatch Container Insights]in the Amazon Elastic Container + // Service Developer Guide. + // + // - dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your + // tasks using the awsvpc network mode can have an IPv6 address assigned. For + // more information on using IPv6 with tasks launched on Amazon EC2 instances, see [Using a VPC in dual-stack mode] + // . For more information on using IPv6 with tasks launched on Fargate, see [Using a VPC in dual-stack mode]. + // + // - fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a + // security or infrastructure update is needed for an Amazon ECS task hosted on + // Fargate, the tasks need to be stopped and new tasks launched to replace them. + // Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a + // Fargate task. For information about the Fargate tasks maintenance, see [Amazon Web Services Fargate task maintenance]in the + // Amazon ECS Developer Guide. + // + // - tagResourceAuthorization - Amazon ECS is introducing tagging authorization + // for resource creation. Users must have permissions for actions that create the + // resource, such as ecsCreateCluster . If tags are specified when you create a + // resource, Amazon Web Services performs additional authorization to verify if + // users or roles have permissions to create tags. Therefore, you must grant + // explicit permissions to use the ecs:TagResource action. For more information, + // see [Grant permission to tag resources on creation]in the Amazon ECS Developer Guide. + // + // - guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon + // ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled + // by your security administrator in your Amazon ECS account. Amazon GuardDuty + // controls this account setting on your behalf. For more information, see [Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring]. + // + // [Grant permission to tag resources on creation]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/supported-iam-actions-tagging.html + // [Using a VPC in dual-stack mode]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-task-networking.html#fargate-task-networking-vpc-dual-stack + // [Amazon Web Services Fargate task maintenance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-maintenance.html + // [Elastic Network Interface Trunking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-eni.html + // [Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-guard-duty-integration.html + // [CloudWatch Container Insights]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html + // + // This member is required. + Name types.SettingName + + // The account setting value for the specified principal ARN. Accepted values are + // enabled , disabled , on , and off . + // + // When you specify fargateTaskRetirementWaitPeriod for the name , the following + // are the valid values: + // + // - 0 - Amazon Web Services sends the notification, and immediately retires the + // affected tasks. + // + // - 7 - Amazon Web Services sends the notification, and waits 7 calendar days to + // retire the tasks. + // + // - 14 - Amazon Web Services sends the notification, and waits 14 calendar days + // to retire the tasks. + // + // This member is required. + Value *string + + // The ARN of the principal, which can be a user, role, or the root user. If you + // specify the root user, it modifies the account setting for all users, roles, and + // the root user of the account unless a user or role explicitly overrides these + // settings. If this field is omitted, the setting is changed only for the + // authenticated user. + // + // You must use the root user when you set the Fargate wait time ( + // fargateTaskRetirementWaitPeriod ). + // + // Federated users assume the account setting of the root user and can't have + // explicit account settings set for them. + PrincipalArn *string + + noSmithyDocumentSerde +} + +type PutAccountSettingOutput struct { + + // The current account setting for a resource. + Setting *types.Setting + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutAccountSettingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutAccountSetting{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutAccountSetting{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutAccountSetting"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpPutAccountSettingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutAccountSetting(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutAccountSetting(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutAccountSetting", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go b/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go new file mode 100644 index 00000000000..27b854b9f5e --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies an account setting for all users on an account for whom no individual +// account setting has been specified. Account settings are set on a per-Region +// basis. +func (c *Client) PutAccountSettingDefault(ctx context.Context, params *PutAccountSettingDefaultInput, optFns ...func(*Options)) (*PutAccountSettingDefaultOutput, error) { + if params == nil { params = &PutAccountSettingDefaultInput{} } + + result, metadata, err := c.invokeOperation(ctx, "PutAccountSettingDefault", params, optFns, c.addOperationPutAccountSettingDefaultMiddlewares) + if err != nil { return nil, err } + + out := result.(*PutAccountSettingDefaultOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutAccountSettingDefaultInput struct { + + // The resource name for which to modify the account setting. + // + // The following are the valid values for the account setting name. + // + // - serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and + // resource ID format of the resource type for a specified user, role, or the root + // user for an account is affected. The opt-in and opt-out account setting must be + // set for each Amazon ECS resource separately. The ARN and resource ID format of a + // resource is defined by the opt-in status of the user or role that created the + // resource. You must turn on this setting to use Amazon ECS features such as + // resource tagging. + // + // - taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and + // resource ID format of the resource type for a specified user, role, or the root + // user for an account is affected. The opt-in and opt-out account setting must be + // set for each Amazon ECS resource separately. The ARN and resource ID format of a + // resource is defined by the opt-in status of the user or role that created the + // resource. You must turn on this setting to use Amazon ECS features such as + // resource tagging. + // + // - containerInstanceLongArnFormat - When modified, the Amazon Resource Name + // (ARN) and resource ID format of the resource type for a specified user, role, or + // the root user for an account is affected. The opt-in and opt-out account setting + // must be set for each Amazon ECS resource separately. The ARN and resource ID + // format of a resource is defined by the opt-in status of the user or role that + // created the resource. You must turn on this setting to use Amazon ECS features + // such as resource tagging. + // + // - awsvpcTrunking - When modified, the elastic network interface (ENI) limit + // for any new container instances that support the feature is changed. If + // awsvpcTrunking is turned on, any new container instances that support the + // feature are launched have the increased ENI limits available to them. For more + // information, see [Elastic Network Interface Trunking]in the Amazon Elastic Container Service Developer Guide. + // + // - containerInsights - When modified, the default setting indicating whether + // Amazon Web Services CloudWatch Container Insights is turned on for your clusters + // is changed. If containerInsights is turned on, any new clusters that are + // created will have Container Insights turned on unless you disable it during + // cluster creation. For more information, see [CloudWatch Container Insights]in the Amazon Elastic Container + // Service Developer Guide. + // + // - dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your + // tasks using the awsvpc network mode can have an IPv6 address assigned. For + // more information on using IPv6 with tasks launched on Amazon EC2 instances, see [Using a VPC in dual-stack mode] + // . For more information on using IPv6 with tasks launched on Fargate, see [Using a VPC in dual-stack mode]. + // + // - fargateFIPSMode - If you specify fargateFIPSMode , Fargate FIPS 140 + // compliance is affected. + // + // - fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a + // security or infrastructure update is needed for an Amazon ECS task hosted on + // Fargate, the tasks need to be stopped and new tasks launched to replace them. + // Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a + // Fargate task. For information about the Fargate tasks maintenance, see [Amazon Web Services Fargate task maintenance]in the + // Amazon ECS Developer Guide. + // + // - tagResourceAuthorization - Amazon ECS is introducing tagging authorization + // for resource creation. Users must have permissions for actions that create the + // resource, such as ecsCreateCluster . If tags are specified when you create a + // resource, Amazon Web Services performs additional authorization to verify if + // users or roles have permissions to create tags. Therefore, you must grant + // explicit permissions to use the ecs:TagResource action. For more information, + // see [Grant permission to tag resources on creation]in the Amazon ECS Developer Guide. + // + // - guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon + // ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled + // by your security administrator in your Amazon ECS account. Amazon GuardDuty + // controls this account setting on your behalf. For more information, see [Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring]. + // + // [Grant permission to tag resources on creation]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/supported-iam-actions-tagging.html + // [Using a VPC in dual-stack mode]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-task-networking.html#fargate-task-networking-vpc-dual-stack + // [Amazon Web Services Fargate task maintenance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-maintenance.html + // [Elastic Network Interface Trunking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-eni.html + // [Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-guard-duty-integration.html + // [CloudWatch Container Insights]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html + // + // This member is required. + Name types.SettingName + + // The account setting value for the specified principal ARN. Accepted values are + // enabled , disabled , on , and off . + // + // When you specify fargateTaskRetirementWaitPeriod for the name , the following + // are the valid values: + // + // - 0 - Amazon Web Services sends the notification, and immediately retires the + // affected tasks. + // + // - 7 - Amazon Web Services sends the notification, and waits 7 calendar days to + // retire the tasks. + // + // - 14 - Amazon Web Services sends the notification, and waits 14 calendar days + // to retire the tasks. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type PutAccountSettingDefaultOutput struct { + + // The current setting for a resource. + Setting *types.Setting + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutAccountSettingDefaultMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutAccountSettingDefault{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutAccountSettingDefault{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutAccountSettingDefault"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpPutAccountSettingDefaultValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutAccountSettingDefault(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutAccountSettingDefault(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutAccountSettingDefault", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go b/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go new file mode 100644 index 00000000000..77ac5477936 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Create or update an attribute on an Amazon ECS resource. If the attribute +// doesn't exist, it's created. If the attribute exists, its value is replaced with +// the specified value. To delete an attribute, use [DeleteAttributes]. For more information, see [Attributes] +// in the Amazon Elastic Container Service Developer Guide. +// +// [Attributes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes +// [DeleteAttributes]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteAttributes.html +func (c *Client) PutAttributes(ctx context.Context, params *PutAttributesInput, optFns ...func(*Options)) (*PutAttributesOutput, error) { + if params == nil { params = &PutAttributesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "PutAttributes", params, optFns, c.addOperationPutAttributesMiddlewares) + if err != nil { return nil, err } + + out := result.(*PutAttributesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutAttributesInput struct { + + // The attributes to apply to your resource. You can specify up to 10 custom + // attributes for each resource. You can specify up to 10 attributes in a single + // call. + // + // This member is required. + Attributes []types.Attribute + + // The short name or full Amazon Resource Name (ARN) of the cluster that contains + // the resource to apply attributes. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + noSmithyDocumentSerde +} + +type PutAttributesOutput struct { + + // The attributes applied to your resource. + Attributes []types.Attribute + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutAttributes{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutAttributes{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpPutAttributesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutAttributes(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutAttributes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutAttributes", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go b/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go new file mode 100644 index 00000000000..fd9efba4cd3 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go @@ -0,0 +1,210 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies the available capacity providers and the default capacity provider +// strategy for a cluster. +// +// You must specify both the available capacity providers and a default capacity +// provider strategy for the cluster. If the specified cluster has existing +// capacity providers associated with it, you must specify all existing capacity +// providers in addition to any new ones you want to add. Any existing capacity +// providers that are associated with a cluster that are omitted from a [PutClusterCapacityProviders]API call +// will be disassociated with the cluster. You can only disassociate an existing +// capacity provider from a cluster if it's not being used by any existing tasks. +// +// When creating a service or running a task on a cluster, if no capacity provider +// or launch type is specified, then the cluster's default capacity provider +// strategy is used. We recommend that you define a default capacity provider +// strategy for your cluster. However, you must specify an empty array ( [] ) to +// bypass defining a default strategy. +// +// [PutClusterCapacityProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html +func (c *Client) PutClusterCapacityProviders(ctx context.Context, params *PutClusterCapacityProvidersInput, optFns ...func(*Options)) (*PutClusterCapacityProvidersOutput, error) { + if params == nil { params = &PutClusterCapacityProvidersInput{} } + + result, metadata, err := c.invokeOperation(ctx, "PutClusterCapacityProviders", params, optFns, c.addOperationPutClusterCapacityProvidersMiddlewares) + if err != nil { return nil, err } + + out := result.(*PutClusterCapacityProvidersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutClusterCapacityProvidersInput struct { + + // The name of one or more capacity providers to associate with the cluster. + // + // If specifying a capacity provider that uses an Auto Scaling group, the capacity + // provider must already be created. New capacity providers can be created with the + // [CreateCapacityProvider]API operation. + // + // To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT + // capacity providers. The Fargate capacity providers are available to all accounts + // and only need to be associated with a cluster to be used. + // + // [CreateCapacityProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html + // + // This member is required. + CapacityProviders []string + + // The short name or full Amazon Resource Name (ARN) of the cluster to modify the + // capacity provider settings for. If you don't specify a cluster, the default + // cluster is assumed. + // + // This member is required. + Cluster *string + + // The capacity provider strategy to use by default for the cluster. + // + // When creating a service or running a task on a cluster, if no capacity provider + // or launch type is specified then the default capacity provider strategy for the + // cluster is used. + // + // A capacity provider strategy consists of one or more capacity providers along + // with the base and weight to assign to them. A capacity provider must be + // associated with the cluster to be used in a capacity provider strategy. The [PutClusterCapacityProviders]API + // is used to associate a capacity provider with a cluster. Only capacity providers + // with an ACTIVE or UPDATING status can be used. + // + // If specifying a capacity provider that uses an Auto Scaling group, the capacity + // provider must already be created. New capacity providers can be created with the + // [CreateCapacityProvider]API operation. + // + // To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT + // capacity providers. The Fargate capacity providers are available to all accounts + // and only need to be associated with a cluster to be used. + // + // [PutClusterCapacityProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html + // [CreateCapacityProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html + // + // This member is required. + DefaultCapacityProviderStrategy []types.CapacityProviderStrategyItem + + noSmithyDocumentSerde +} + +type PutClusterCapacityProvidersOutput struct { + + // Details about the cluster. + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutClusterCapacityProvidersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutClusterCapacityProviders{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutClusterCapacityProviders{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutClusterCapacityProviders"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpPutClusterCapacityProvidersValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutClusterCapacityProviders(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutClusterCapacityProviders(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutClusterCapacityProviders", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go b/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go new file mode 100644 index 00000000000..0425b64d6b9 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// This action is only used by the Amazon ECS agent, and it is not intended for +// use outside of the agent. +// +// Registers an EC2 instance into the specified cluster. This instance becomes +// available to place containers on. +func (c *Client) RegisterContainerInstance(ctx context.Context, params *RegisterContainerInstanceInput, optFns ...func(*Options)) (*RegisterContainerInstanceOutput, error) { + if params == nil { params = &RegisterContainerInstanceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "RegisterContainerInstance", params, optFns, c.addOperationRegisterContainerInstanceMiddlewares) + if err != nil { return nil, err } + + out := result.(*RegisterContainerInstanceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterContainerInstanceInput struct { + + // The container instance attributes that this container instance supports. + Attributes []types.Attribute + + ClientToken *string + + // The short name or full Amazon Resource Name (ARN) of the cluster to register + // your container instance with. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + // The ARN of the container instance (if it was previously registered). + ContainerInstanceArn *string + + // The instance identity document for the EC2 instance to register. This document + // can be found by running the following command from the instance: curl + // http://169.254.169.254/latest/dynamic/instance-identity/document/ + InstanceIdentityDocument *string + + // The instance identity document signature for the EC2 instance to register. This + // signature can be found by running the following command from the instance: curl + // http://169.254.169.254/latest/dynamic/instance-identity/signature/ + InstanceIdentityDocumentSignature *string + + // The devices that are available on the container instance. The only supported + // device type is a GPU. + PlatformDevices []types.PlatformDevice + + // The metadata that you apply to the container instance to help you categorize + // and organize them. Each tag consists of a key and an optional value. You define + // both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + // The resources available on the instance. + TotalResources []types.Resource + + // The version information for the Amazon ECS container agent and Docker daemon + // that runs on the container instance. + VersionInfo *types.VersionInfo + + noSmithyDocumentSerde +} + +type RegisterContainerInstanceOutput struct { + + // The container instance that was registered. + ContainerInstance *types.ContainerInstance + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterContainerInstanceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRegisterContainerInstance{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRegisterContainerInstance{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterContainerInstance"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpRegisterContainerInstanceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterContainerInstance(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterContainerInstance(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterContainerInstance", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go b/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go new file mode 100644 index 00000000000..42c22e7df18 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go @@ -0,0 +1,447 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Registers a new task definition from the supplied family and +// containerDefinitions . Optionally, you can add data volumes to your containers +// with the volumes parameter. For more information about task definition +// parameters and defaults, see [Amazon ECS Task Definitions]in the Amazon Elastic Container Service Developer +// Guide. +// +// You can specify a role for your task with the taskRoleArn parameter. When you +// specify a role for a task, its containers can then use the latest versions of +// the CLI or SDKs to make API requests to the Amazon Web Services services that +// are specified in the policy that's associated with the role. For more +// information, see [IAM Roles for Tasks]in the Amazon Elastic Container Service Developer Guide. +// +// You can specify a Docker networking mode for the containers in your task +// definition with the networkMode parameter. If you specify the awsvpc network +// mode, the task is allocated an elastic network interface, and you must specify a +// [NetworkConfiguration]when you create a service or run a task with the task definition. For more +// information, see [Task Networking]in the Amazon Elastic Container Service Developer Guide. +// +// [Amazon ECS Task Definitions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html +// [Task Networking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html +// [IAM Roles for Tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html +// [NetworkConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html +func (c *Client) RegisterTaskDefinition(ctx context.Context, params *RegisterTaskDefinitionInput, optFns ...func(*Options)) (*RegisterTaskDefinitionOutput, error) { + if params == nil { params = &RegisterTaskDefinitionInput{} } + + result, metadata, err := c.invokeOperation(ctx, "RegisterTaskDefinition", params, optFns, c.addOperationRegisterTaskDefinitionMiddlewares) + if err != nil { return nil, err } + + out := result.(*RegisterTaskDefinitionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterTaskDefinitionInput struct { + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. + // + // This member is required. + ContainerDefinitions []types.ContainerDefinition + + // You must specify a family for a task definition. You can use it track multiple + // versions of the same task definition. The family is used as a name for your + // task definition. Up to 255 letters (uppercase and lowercase), numbers, + // underscores, and hyphens are allowed. + // + // This member is required. + Family *string + + // The number of CPU units used by the task. It can be expressed as an integer + // using CPU units (for example, 1024 ) or as a string using vCPUs (for example, 1 + // vCPU or 1 vcpu ) in a task definition. String values are converted to an integer + // indicating the CPU units when the task definition is registered. + // + // Task-level CPU and memory parameters are ignored for Windows containers. We + // recommend specifying container-level resources for Windows containers. + // + // If you're using the EC2 launch type, this field is optional. Supported values + // are between 128 CPU units ( 0.125 vCPUs) and 10240 CPU units ( 10 vCPUs). If + // you do not specify a value, the parameter is ignored. + // + // If you're using the Fargate launch type, this field is required and you must + // use one of the following values, which determines your range of supported values + // for the memory parameter: + // + // The CPU units cannot be less than 1 vCPU when you use Windows containers on + // Fargate. + // + // - 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 + // GB) + // + // - 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 + // GB), 4096 (4 GB) + // + // - 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 + // GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // - 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in + // increments of 1024 (1 GB) + // + // - 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in + // increments of 1024 (1 GB) + // + // - 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments + // + // This option requires Linux platform 1.4.0 or later. + // + // - 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments + // + // This option requires Linux platform 1.4.0 or later. + Cpu *string + + // The amount of ephemeral storage to allocate for the task. This parameter is + // used to expand the total amount of ephemeral storage available, beyond the + // default amount, for tasks hosted on Fargate. For more information, see [Using data volumes in tasks]in the + // Amazon ECS Developer Guide. + // + // For tasks using the Fargate launch type, the task requires the following + // platforms: + // + // - Linux platform version 1.4.0 or later. + // + // - Windows platform version 1.0.0 or later. + // + // [Using data volumes in tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html + EphemeralStorage *types.EphemeralStorage + + // The Amazon Resource Name (ARN) of the task execution role that grants the + // Amazon ECS container agent permission to make Amazon Web Services API calls on + // your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS]in + // the Amazon Elastic Container Service Developer Guide. + // + // [IAM roles for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html + ExecutionRoleArn *string + + // The Elastic Inference accelerators to use for the containers in the task. + InferenceAccelerators []types.InferenceAccelerator + + // The IPC resource namespace to use for the containers in the task. The valid + // values are host , task , or none . If host is specified, then all containers + // within the tasks that specified the host IPC mode on the same container + // instance share the same IPC resources with the host Amazon EC2 instance. If task + // is specified, all containers within the specified task share the same IPC + // resources. If none is specified, then IPC resources within the containers of a + // task are private and not shared with other containers in a task or on the + // container instance. If no value is specified, then the IPC resource namespace + // sharing depends on the Docker daemon setting on the container instance. + // + // If the host IPC mode is used, be aware that there is a heightened risk of + // undesired IPC namespace expose. + // + // If you are setting namespaced kernel parameters using systemControls for the + // containers in the task, the following will apply to your IPC resource namespace. + // For more information, see [System Controls]in the Amazon Elastic Container Service Developer + // Guide. + // + // - For tasks that use the host IPC mode, IPC namespace related systemControls + // are not supported. + // + // - For tasks that use the task IPC mode, IPC namespace related systemControls + // will apply to all containers within a task. + // + // This parameter is not supported for Windows containers or tasks run on Fargate. + // + // [System Controls]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html + IpcMode types.IpcMode + + // The amount of memory (in MiB) used by the task. It can be expressed as an + // integer using MiB (for example , 1024 ) or as a string using GB (for example, + // 1GB or 1 GB ) in a task definition. String values are converted to an integer + // indicating the MiB when the task definition is registered. + // + // Task-level CPU and memory parameters are ignored for Windows containers. We + // recommend specifying container-level resources for Windows containers. + // + // If using the EC2 launch type, this field is optional. + // + // If using the Fargate launch type, this field is required and you must use one + // of the following values. This determines your range of supported values for the + // cpu parameter. + // + // The CPU units cannot be less than 1 vCPU when you use Windows containers on + // Fargate. + // + // - 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) + // + // - 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // - 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 + // GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // - Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // - Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + // + // - Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 + // vCPU) + // + // This option requires Linux platform 1.4.0 or later. + // + // - Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 + // vCPU) + // + // This option requires Linux platform 1.4.0 or later. + Memory *string + + // The Docker networking mode to use for the containers in the task. The valid + // values are none , bridge , awsvpc , and host . If no network mode is specified, + // the default is bridge . + // + // For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For + // Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. + // For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If + // the network mode is set to none , you cannot specify port mappings in your + // container definitions, and the tasks containers do not have external + // connectivity. The host and awsvpc network modes offer the highest networking + // performance for containers because they use the EC2 network stack instead of the + // virtualized network stack provided by the bridge mode. + // + // With the host and awsvpc network modes, exposed container ports are mapped + // directly to the corresponding host port (for the host network mode) or the + // attached elastic network interface port (for the awsvpc network mode), so you + // cannot take advantage of dynamic host port mappings. + // + // When using the host network mode, you should not run containers using the root + // user (UID 0). It is considered best practice to use a non-root user. + // + // If the network mode is awsvpc , the task is allocated an elastic network + // interface, and you must specify a [NetworkConfiguration]value when you create a service or run a task + // with the task definition. For more information, see [Task Networking]in the Amazon Elastic + // Container Service Developer Guide. + // + // If the network mode is host , you cannot run multiple instantiations of the same + // task on a single container instance when port mappings are used. + // + // [Task Networking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html + // [NetworkConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html + NetworkMode types.NetworkMode + + // The process namespace to use for the containers in the task. The valid values + // are host or task . On Fargate for Linux containers, the only valid value is task + // . For example, monitoring sidecars might need pidMode to access information + // about other containers running in the same task. + // + // If host is specified, all containers within the tasks that specified the host + // PID mode on the same container instance share the same process namespace with + // the host Amazon EC2 instance. + // + // If task is specified, all containers within the specified task share the same + // process namespace. + // + // If no value is specified, the default is a private namespace for each container. + // + // If the host PID mode is used, there's a heightened risk of undesired process + // namespace exposure. + // + // This parameter is not supported for Windows containers. + // + // This parameter is only supported for tasks that are hosted on Fargate if the + // tasks are using platform version 1.4.0 or later (Linux). This isn't supported + // for Windows containers on Fargate. + PidMode types.PidMode + + // An array of placement constraint objects to use for the task. You can specify a + // maximum of 10 constraints for each task. This limit includes constraints in the + // task definition and those specified at runtime. + PlacementConstraints []types.TaskDefinitionPlacementConstraint + + // The configuration details for the App Mesh proxy. + // + // For tasks hosted on Amazon EC2 instances, the container instances require at + // least version 1.26.0 of the container agent and at least version 1.26.0-1 of + // the ecs-init package to use a proxy configuration. If your container instances + // are launched from the Amazon ECS-optimized AMI version 20190301 or later, then + // they contain the required versions of the container agent and ecs-init . For + // more information, see [Amazon ECS-optimized AMI versions]in the Amazon Elastic Container Service Developer Guide. + // + // [Amazon ECS-optimized AMI versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-ami-versions.html + ProxyConfiguration *types.ProxyConfiguration + + // The task launch type that Amazon ECS validates the task definition against. A + // client exception is returned if the task definition doesn't validate against the + // compatibilities specified. If no value is specified, the parameter is omitted + // from the response. + RequiresCompatibilities []types.Compatibility + + // The operating system that your tasks definitions run on. A platform family is + // specified only for tasks using the Fargate launch type. + RuntimePlatform *types.RuntimePlatform + + // The metadata that you apply to the task definition to help you categorize and + // organize them. Each tag consists of a key and an optional value. You define both + // of them. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + // The short name or full Amazon Resource Name (ARN) of the IAM role that + // containers in this task can assume. All containers in this task are granted the + // permissions that are specified in this role. For more information, see [IAM Roles for Tasks]in the + // Amazon Elastic Container Service Developer Guide. + // + // [IAM Roles for Tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html + TaskRoleArn *string + + // A list of volume definitions in JSON format that containers in your task might + // use. + Volumes []types.Volume + + noSmithyDocumentSerde +} + +type RegisterTaskDefinitionOutput struct { + + // The list of tags associated with the task definition. + Tags []types.Tag + + // The full description of the registered task definition. + TaskDefinition *types.TaskDefinition + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterTaskDefinitionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRegisterTaskDefinition{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRegisterTaskDefinition{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterTaskDefinition"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpRegisterTaskDefinitionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterTaskDefinition(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterTaskDefinition(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterTaskDefinition", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_RunTask.go b/aws-sdk-go-v2/service/ecs/api_op_RunTask.go new file mode 100644 index 00000000000..03cf949d664 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_RunTask.go @@ -0,0 +1,424 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Starts a new task using the specified task definition. +// +// On March 21, 2024, a change was made to resolve the task definition revision +// before authorization. When a task definition revision is not specified, +// authorization will occur using the latest revision of a task definition. +// +// Amazon Elastic Inference (EI) is no longer available to customers. +// +// You can allow Amazon ECS to place tasks for you, or you can customize how +// Amazon ECS places tasks using placement constraints and placement strategies. +// For more information, see [Scheduling Tasks]in the Amazon Elastic Container Service Developer +// Guide. +// +// Alternatively, you can use StartTask to use your own scheduler or place tasks +// manually on specific container instances. +// +// You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume +// when creating or updating a service. For more infomation, see [Amazon EBS volumes]in the Amazon +// Elastic Container Service Developer Guide. +// +// The Amazon ECS API follows an eventual consistency model. This is because of +// the distributed nature of the system supporting the API. This means that the +// result of an API command you run that affects your Amazon ECS resources might +// not be immediately visible to all subsequent commands you run. Keep this in mind +// when you carry out an API command that immediately follows a previous API +// command. +// +// To manage eventual consistency, you can do the following: +// +// - Confirm the state of the resource before you run a command to modify it. +// Run the DescribeTasks command using an exponential backoff algorithm to ensure +// that you allow enough time for the previous command to propagate through the +// system. To do this, run the DescribeTasks command repeatedly, starting with a +// couple of seconds of wait time and increasing gradually up to five minutes of +// wait time. +// +// - Add wait time between subsequent commands, even if the DescribeTasks +// command returns an accurate response. Apply an exponential backoff algorithm +// starting with a couple of seconds of wait time, and increase gradually up to +// about five minutes of wait time. +// +// [Scheduling Tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html +// [Amazon EBS volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types +func (c *Client) RunTask(ctx context.Context, params *RunTaskInput, optFns ...func(*Options)) (*RunTaskOutput, error) { + if params == nil { params = &RunTaskInput{} } + + result, metadata, err := c.invokeOperation(ctx, "RunTask", params, optFns, c.addOperationRunTaskMiddlewares) + if err != nil { return nil, err } + + out := result.(*RunTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RunTaskInput struct { + + // The family and revision ( family:revision ) or full ARN of the task definition + // to run. If a revision isn't specified, the latest ACTIVE revision is used. + // + // The full ARN value must match the value that you specified as the Resource of + // the principal's permissions policy. + // + // When you specify a task definition, you must either specify a specific + // revision, or all revisions in the ARN. + // + // To specify a specific revision, include the revision number in the ARN. For + // example, to specify revision 2, use + // arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2 . + // + // To specify all revisions, use the wildcard (*) in the ARN. For example, to + // specify all revisions, use + // arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:* . + // + // For more information, see [Policy Resources for Amazon ECS] in the Amazon Elastic Container Service Developer + // Guide. + // + // [Policy Resources for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security_iam_service-with-iam.html#security_iam_service-with-iam-id-based-policies-resources + // + // This member is required. + TaskDefinition *string + + // The capacity provider strategy to use for the task. + // + // If a capacityProviderStrategy is specified, the launchType parameter must be + // omitted. If no capacityProviderStrategy or launchType is specified, the + // defaultCapacityProviderStrategy for the cluster is used. + // + // When you use cluster auto scaling, you must specify capacityProviderStrategy + // and not launchType . + // + // A capacity provider strategy may contain a maximum of 6 capacity providers. + CapacityProviderStrategy []types.CapacityProviderStrategyItem + + // An identifier that you provide to ensure the idempotency of the request. It + // must be unique and is case sensitive. Up to 64 characters are allowed. The valid + // characters are characters in the range of 33-126, inclusive. For more + // information, see [Ensuring idempotency]. + // + // [Ensuring idempotency]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/ECS_Idempotency.html + ClientToken *string + + // The short name or full Amazon Resource Name (ARN) of the cluster to run your + // task on. If you do not specify a cluster, the default cluster is assumed. + Cluster *string + + // The number of instantiations of the specified task to place on your cluster. + // You can specify up to 10 tasks for each call. + Count *int32 + + // Specifies whether to use Amazon ECS managed tags for the task. For more + // information, see [Tagging Your Amazon ECS Resources]in the Amazon Elastic Container Service Developer Guide. + // + // [Tagging Your Amazon ECS Resources]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html + EnableECSManagedTags bool + + // Determines whether to use the execute command functionality for the containers + // in this task. If true , this enables execute command functionality on all + // containers in the task. + // + // If true , then the task definition must have a task role, or you must provide + // one as an override. + EnableExecuteCommand bool + + // The name of the task group to associate with the task. The default value is the + // family name of the task definition (for example, family:my-family-name ). + Group *string + + // The infrastructure to run your standalone task on. For more information, see [Amazon ECS launch types] + // in the Amazon Elastic Container Service Developer Guide. + // + // The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure. + // + // Fargate Spot infrastructure is available for use but a capacity provider + // strategy must be used. For more information, see [Fargate capacity providers]in the Amazon ECS Developer + // Guide. + // + // The EC2 launch type runs your tasks on Amazon EC2 instances registered to your + // cluster. + // + // The EXTERNAL launch type runs your tasks on your on-premises server or virtual + // machine (VM) capacity registered to your cluster. + // + // A task can use either a launch type or a capacity provider strategy. If a + // launchType is specified, the capacityProviderStrategy parameter must be omitted. + // + // When you use cluster auto scaling, you must specify capacityProviderStrategy + // and not launchType . + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + // [Fargate capacity providers]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-capacity-providers.html + LaunchType types.LaunchType + + // The network configuration for the task. This parameter is required for task + // definitions that use the awsvpc network mode to receive their own elastic + // network interface, and it isn't supported for other network modes. For more + // information, see [Task networking]in the Amazon Elastic Container Service Developer Guide. + // + // [Task networking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html + NetworkConfiguration *types.NetworkConfiguration + + // A list of container overrides in JSON format that specify the name of a + // container in the specified task definition and the overrides it should receive. + // You can override the default command for a container (that's specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition or + // Docker image) on a container or add new environment variables to it with an + // environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes the + // JSON formatting characters of the override structure. + Overrides *types.TaskOverride + + // An array of placement constraint objects to use for the task. You can specify + // up to 10 constraints for each task (including constraints in the task definition + // and those specified at runtime). + PlacementConstraints []types.PlacementConstraint + + // The placement strategy objects to use for the task. You can specify a maximum + // of 5 strategy rules for each task. + PlacementStrategy []types.PlacementStrategy + + // The platform version the task uses. A platform version is only specified for + // tasks hosted on Fargate. If one isn't specified, the LATEST platform version is + // used. For more information, see [Fargate platform versions]in the Amazon Elastic Container Service + // Developer Guide. + // + // [Fargate platform versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // Specifies whether to propagate the tags from the task definition to the task. + // If no value is specified, the tags aren't propagated. Tags can only be + // propagated to the task during task creation. To add tags to a task after task + // creation, use the[TagResource] API action. + // + // An error will be received if you specify the SERVICE option when running a task. + // + // [TagResource]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html + PropagateTags types.PropagateTags + + // This parameter is only used by Amazon ECS. It is not intended for use by + // customers. + ReferenceId *string + + // An optional tag specified when a task is started. For example, if you + // automatically trigger a task to run a batch process job, you could apply a + // unique identifier for that job to your task with the startedBy parameter. You + // can then identify which tasks belong to that job by filtering the results of a [ListTasks] + // call with the startedBy value. Up to 128 letters (uppercase and lowercase), + // numbers, hyphens (-), forward slash (/), and underscores (_) are allowed. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + // + // [ListTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListTasks.html + StartedBy *string + + // The metadata that you apply to the task to help you categorize and organize + // them. Each tag consists of a key and an optional value, both of which you + // define. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + // The details of the volume that was configuredAtLaunch . You can configure the + // size, volumeType, IOPS, throughput, snapshot and encryption in in [TaskManagedEBSVolumeConfiguration]. The name of + // the volume must match the name from the task definition. + // + // [TaskManagedEBSVolumeConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TaskManagedEBSVolumeConfiguration.html + VolumeConfigurations []types.TaskVolumeConfiguration + + noSmithyDocumentSerde +} + +type RunTaskOutput struct { + + // Any failures associated with the call. + // + // For information about how to address failures, see [Service event messages] and [API failure reasons] in the Amazon Elastic + // Container Service Developer Guide. + // + // [API failure reasons]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html + // [Service event messages]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-event-messages.html#service-event-messages-list + Failures []types.Failure + + // A full description of the tasks that were run. The tasks that were successfully + // placed on your cluster are described here. + Tasks []types.Task + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRunTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRunTask{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRunTask{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "RunTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opRunTaskMiddleware(stack, options); err != nil { + return err + } + if err = addOpRunTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRunTask(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpRunTask struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpRunTask) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpRunTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*RunTaskInput) + if !ok { return out, metadata, fmt.Errorf("expected middleware input to be of type *RunTaskInput ")} + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { return out, metadata, err } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opRunTaskMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpRunTask{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opRunTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RunTask", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_StartTask.go b/aws-sdk-go-v2/service/ecs/api_op_StartTask.go new file mode 100644 index 00000000000..710d9b386f7 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_StartTask.go @@ -0,0 +1,270 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Starts a new task from the specified task definition on the specified container +// instance or instances. +// +// On March 21, 2024, a change was made to resolve the task definition revision +// before authorization. When a task definition revision is not specified, +// authorization will occur using the latest revision of a task definition. +// +// Amazon Elastic Inference (EI) is no longer available to customers. +// +// Alternatively, you can use RunTask to place tasks for you. For more +// information, see [Scheduling Tasks]in the Amazon Elastic Container Service Developer Guide. +// +// You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume +// when creating or updating a service. For more infomation, see [Amazon EBS volumes]in the Amazon +// Elastic Container Service Developer Guide. +// +// [Scheduling Tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html +// [Amazon EBS volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types +func (c *Client) StartTask(ctx context.Context, params *StartTaskInput, optFns ...func(*Options)) (*StartTaskOutput, error) { + if params == nil { params = &StartTaskInput{} } + + result, metadata, err := c.invokeOperation(ctx, "StartTask", params, optFns, c.addOperationStartTaskMiddlewares) + if err != nil { return nil, err } + + out := result.(*StartTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartTaskInput struct { + + // The container instance IDs or full ARN entries for the container instances + // where you would like to place your task. You can specify up to 10 container + // instances. + // + // This member is required. + ContainerInstances []string + + // The family and revision ( family:revision ) or full ARN of the task definition + // to start. If a revision isn't specified, the latest ACTIVE revision is used. + // + // This member is required. + TaskDefinition *string + + // The short name or full Amazon Resource Name (ARN) of the cluster where to start + // your task. If you do not specify a cluster, the default cluster is assumed. + Cluster *string + + // Specifies whether to use Amazon ECS managed tags for the task. For more + // information, see [Tagging Your Amazon ECS Resources]in the Amazon Elastic Container Service Developer Guide. + // + // [Tagging Your Amazon ECS Resources]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html + EnableECSManagedTags bool + + // Whether or not the execute command functionality is turned on for the task. If + // true , this turns on the execute command functionality on all containers in the + // task. + EnableExecuteCommand bool + + // The name of the task group to associate with the task. The default value is the + // family name of the task definition (for example, family:my-family-name). + Group *string + + // The VPC subnet and security group configuration for tasks that receive their + // own elastic network interface by using the awsvpc networking mode. + NetworkConfiguration *types.NetworkConfiguration + + // A list of container overrides in JSON format that specify the name of a + // container in the specified task definition and the overrides it receives. You + // can override the default command for a container (that's specified in the task + // definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition or + // Docker image) on a container or add new environment variables to it with an + // environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes the + // JSON formatting characters of the override structure. + Overrides *types.TaskOverride + + // Specifies whether to propagate the tags from the task definition or the service + // to the task. If no value is specified, the tags aren't propagated. + PropagateTags types.PropagateTags + + // This parameter is only used by Amazon ECS. It is not intended for use by + // customers. + ReferenceId *string + + // An optional tag specified when a task is started. For example, if you + // automatically trigger a task to run a batch process job, you could apply a + // unique identifier for that job to your task with the startedBy parameter. You + // can then identify which tasks belong to that job by filtering the results of a [ListTasks] + // call with the startedBy value. Up to 36 letters (uppercase and lowercase), + // numbers, hyphens (-), forward slash (/), and underscores (_) are allowed. + // + // If a task is started by an Amazon ECS service, the startedBy parameter contains + // the deployment ID of the service that starts it. + // + // [ListTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListTasks.html + StartedBy *string + + // The metadata that you apply to the task to help you categorize and organize + // them. Each tag consists of a key and an optional value, both of which you + // define. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []types.Tag + + // The details of the volume that was configuredAtLaunch . You can configure the + // size, volumeType, IOPS, throughput, snapshot and encryption in [TaskManagedEBSVolumeConfiguration]. The name of + // the volume must match the name from the task definition. + // + // [TaskManagedEBSVolumeConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TaskManagedEBSVolumeConfiguration.html + VolumeConfigurations []types.TaskVolumeConfiguration + + noSmithyDocumentSerde +} + +type StartTaskOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // A full description of the tasks that were started. Each task that was + // successfully placed on your container instances is described. + Tasks []types.Task + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartTask{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartTask{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpStartTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartTask(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartTask", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_StopTask.go b/aws-sdk-go-v2/service/ecs/api_op_StopTask.go new file mode 100644 index 00000000000..d90dec3317b --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_StopTask.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Stops a running task. Any tags associated with the task will be deleted. +// +// When you call StopTask on a task, the equivalent of docker stop is issued to +// the containers running in the task. This results in a SIGTERM value and a +// default 30-second timeout, after which the SIGKILL value is sent and the +// containers are forcibly stopped. If the container handles the SIGTERM value +// gracefully and exits within 30 seconds from receiving it, no SIGKILL value is +// sent. +// +// For Windows containers, POSIX signals do not work and runtime stops the +// container by sending a CTRL_SHUTDOWN_EVENT . For more information, see [Unable to react to graceful shutdown of (Windows) container #25982] on +// GitHub. +// +// The default 30-second timeout can be configured on the Amazon ECS container +// agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see [Amazon ECS Container Agent Configuration] +// in the Amazon Elastic Container Service Developer Guide. +// +// [Unable to react to graceful shutdown of (Windows) container #25982]: https://github.com/moby/moby/issues/25982 +// [Amazon ECS Container Agent Configuration]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html +func (c *Client) StopTask(ctx context.Context, params *StopTaskInput, optFns ...func(*Options)) (*StopTaskOutput, error) { + if params == nil { params = &StopTaskInput{} } + + result, metadata, err := c.invokeOperation(ctx, "StopTask", params, optFns, c.addOperationStopTaskMiddlewares) + if err != nil { return nil, err } + + out := result.(*StopTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StopTaskInput struct { + + // The task ID of the task to stop. + // + // This member is required. + Task *string + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // task to stop. If you do not specify a cluster, the default cluster is assumed. + Cluster *string + + // An optional message specified when a task is stopped. For example, if you're + // using a custom scheduler, you can use this parameter to specify the reason for + // stopping the task here, and the message appears in subsequent [DescribeTasks]> API operations + // on this task. + // + // [DescribeTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html + Reason *string + + noSmithyDocumentSerde +} + +type StopTaskOutput struct { + + // The task that was stopped. + Task *types.Task + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStopTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStopTask{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStopTask{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "StopTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpStopTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStopTask(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStopTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StopTask", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go b/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go new file mode 100644 index 00000000000..afa535493f4 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go @@ -0,0 +1,157 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// This action is only used by the Amazon ECS agent, and it is not intended for +// use outside of the agent. +// +// Sent to acknowledge that an attachment changed states. +func (c *Client) SubmitAttachmentStateChanges(ctx context.Context, params *SubmitAttachmentStateChangesInput, optFns ...func(*Options)) (*SubmitAttachmentStateChangesOutput, error) { + if params == nil { params = &SubmitAttachmentStateChangesInput{} } + + result, metadata, err := c.invokeOperation(ctx, "SubmitAttachmentStateChanges", params, optFns, c.addOperationSubmitAttachmentStateChangesMiddlewares) + if err != nil { return nil, err } + + out := result.(*SubmitAttachmentStateChangesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SubmitAttachmentStateChangesInput struct { + + // Any attachments associated with the state change request. + // + // This member is required. + Attachments []types.AttachmentStateChange + + // The short name or full ARN of the cluster that hosts the container instance the + // attachment belongs to. + Cluster *string + + noSmithyDocumentSerde +} + +type SubmitAttachmentStateChangesOutput struct { + + // Acknowledgement of the state change. + Acknowledgment *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSubmitAttachmentStateChangesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSubmitAttachmentStateChanges{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSubmitAttachmentStateChanges{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SubmitAttachmentStateChanges"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpSubmitAttachmentStateChangesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSubmitAttachmentStateChanges(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSubmitAttachmentStateChanges(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "SubmitAttachmentStateChanges", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go b/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go new file mode 100644 index 00000000000..a1bc5e5a444 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// This action is only used by the Amazon ECS agent, and it is not intended for +// use outside of the agent. +// +// Sent to acknowledge that a container changed states. +func (c *Client) SubmitContainerStateChange(ctx context.Context, params *SubmitContainerStateChangeInput, optFns ...func(*Options)) (*SubmitContainerStateChangeOutput, error) { + if params == nil { params = &SubmitContainerStateChangeInput{} } + + result, metadata, err := c.invokeOperation(ctx, "SubmitContainerStateChange", params, optFns, c.addOperationSubmitContainerStateChangeMiddlewares) + if err != nil { return nil, err } + + out := result.(*SubmitContainerStateChangeOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SubmitContainerStateChangeInput struct { + + // The short name or full ARN of the cluster that hosts the container. + Cluster *string + + // The name of the container. + ContainerName *string + + // The exit code that's returned for the state change request. + ExitCode *int32 + + // The network bindings of the container. + NetworkBindings []types.NetworkBinding + + // The reason for the state change request. + Reason *string + + // The ID of the Docker container. + RuntimeId *string + + // The status of the state change request. + Status *string + + // The task ID or full Amazon Resource Name (ARN) of the task that hosts the + // container. + Task *string + + noSmithyDocumentSerde +} + +type SubmitContainerStateChangeOutput struct { + + // Acknowledgement of the state change. + Acknowledgment *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSubmitContainerStateChangeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSubmitContainerStateChange{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSubmitContainerStateChange{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SubmitContainerStateChange"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSubmitContainerStateChange(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSubmitContainerStateChange(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "SubmitContainerStateChange", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go b/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go new file mode 100644 index 00000000000..a5647abd377 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// This action is only used by the Amazon ECS agent, and it is not intended for +// use outside of the agent. +// +// Sent to acknowledge that a task changed states. +func (c *Client) SubmitTaskStateChange(ctx context.Context, params *SubmitTaskStateChangeInput, optFns ...func(*Options)) (*SubmitTaskStateChangeOutput, error) { + if params == nil { params = &SubmitTaskStateChangeInput{} } + + result, metadata, err := c.invokeOperation(ctx, "SubmitTaskStateChange", params, optFns, c.addOperationSubmitTaskStateChangeMiddlewares) + if err != nil { return nil, err } + + out := result.(*SubmitTaskStateChangeOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SubmitTaskStateChangeInput struct { + + // Any attachments associated with the state change request. + Attachments []types.AttachmentStateChange + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // task. + Cluster *string + + // Any containers that's associated with the state change request. + Containers []types.ContainerStateChange + + // The Unix timestamp for the time when the task execution stopped. + ExecutionStoppedAt *time.Time + + // The details for the managed agent that's associated with the task. + ManagedAgents []types.ManagedAgentStateChange + + // The Unix timestamp for the time when the container image pull started. + PullStartedAt *time.Time + + // The Unix timestamp for the time when the container image pull completed. + PullStoppedAt *time.Time + + // The reason for the state change request. + Reason *string + + // The status of the state change request. + Status *string + + // The task ID or full ARN of the task in the state change request. + Task *string + + noSmithyDocumentSerde +} + +type SubmitTaskStateChangeOutput struct { + + // Acknowledgement of the state change. + Acknowledgment *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSubmitTaskStateChangeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSubmitTaskStateChange{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSubmitTaskStateChange{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SubmitTaskStateChange"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpSubmitTaskStateChangeValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSubmitTaskStateChange(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSubmitTaskStateChange(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "SubmitTaskStateChange", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_TagResource.go b/aws-sdk-go-v2/service/ecs/api_op_TagResource.go new file mode 100644 index 00000000000..e0887ee9d77 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_TagResource.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Associates the specified tags to a resource with the specified resourceArn . If +// existing tags on a resource aren't specified in the request parameters, they +// aren't changed. When a resource is deleted, the tags that are associated with +// that resource are deleted as well. +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { params = &TagResourceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { return nil, err } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the + // supported resources are Amazon ECS capacity providers, tasks, services, task + // definitions, clusters, and container instances. + // + // This member is required. + ResourceArn *string + + // The tags to add to the resource. A tag is an array of key-value pairs. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TagResource", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go b/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go new file mode 100644 index 00000000000..f8b1f00b6ad --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go @@ -0,0 +1,152 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes specified tags from a resource. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { params = &UntagResourceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { return nil, err } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, + // the supported resources are Amazon ECS capacity providers, tasks, services, task + // definitions, clusters, and container instances. + // + // This member is required. + ResourceArn *string + + // The keys of the tags to be removed. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UntagResource", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go new file mode 100644 index 00000000000..775dcc6d2d0 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go @@ -0,0 +1,156 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies the parameters for a capacity provider. +func (c *Client) UpdateCapacityProvider(ctx context.Context, params *UpdateCapacityProviderInput, optFns ...func(*Options)) (*UpdateCapacityProviderOutput, error) { + if params == nil { params = &UpdateCapacityProviderInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCapacityProvider", params, optFns, c.addOperationUpdateCapacityProviderMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateCapacityProviderOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateCapacityProviderInput struct { + + // An object that represent the parameters to update for the Auto Scaling group + // capacity provider. + // + // This member is required. + AutoScalingGroupProvider *types.AutoScalingGroupProviderUpdate + + // The name of the capacity provider to update. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +type UpdateCapacityProviderOutput struct { + + // Details about the capacity provider. + CapacityProvider *types.CapacityProvider + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateCapacityProviderMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCapacityProvider{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCapacityProvider{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateCapacityProvider"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateCapacityProviderValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCapacityProvider(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCapacityProvider(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateCapacityProvider", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go new file mode 100644 index 00000000000..66d356178bb --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go @@ -0,0 +1,174 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Updates the cluster. +func (c *Client) UpdateCluster(ctx context.Context, params *UpdateClusterInput, optFns ...func(*Options)) (*UpdateClusterOutput, error) { + if params == nil { params = &UpdateClusterInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCluster", params, optFns, c.addOperationUpdateClusterMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateClusterInput struct { + + // The name of the cluster to modify the settings for. + // + // This member is required. + Cluster *string + + // The execute command configuration for the cluster. + Configuration *types.ClusterConfiguration + + // Use this parameter to set a default Service Connect namespace. After you set a + // default Service Connect namespace, any new services with Service Connect turned + // on that are created in the cluster are added as client services in the + // namespace. This setting only applies to new services that set the enabled + // parameter to true in the ServiceConnectConfiguration . You can set the namespace + // of each service individually in the ServiceConnectConfiguration to override + // this default parameter. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectDefaults *types.ClusterServiceConnectDefaultsRequest + + // The cluster settings for your cluster. + Settings []types.ClusterSetting + + noSmithyDocumentSerde +} + +type UpdateClusterOutput struct { + + // Details about the cluster. + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCluster{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCluster{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCluster(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateCluster", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go new file mode 100644 index 00000000000..11f449ed01b --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go @@ -0,0 +1,166 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies the settings to use for a cluster. +func (c *Client) UpdateClusterSettings(ctx context.Context, params *UpdateClusterSettingsInput, optFns ...func(*Options)) (*UpdateClusterSettingsOutput, error) { + if params == nil { params = &UpdateClusterSettingsInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateClusterSettings", params, optFns, c.addOperationUpdateClusterSettingsMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateClusterSettingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateClusterSettingsInput struct { + + // The name of the cluster to modify the settings for. + // + // This member is required. + Cluster *string + + // The setting to use by default for a cluster. This parameter is used to turn on + // CloudWatch Container Insights for a cluster. If this value is specified, it + // overrides the containerInsights value set with [PutAccountSetting] or [PutAccountSettingDefault]. + // + // Currently, if you delete an existing cluster that does not have Container + // Insights turned on, and then create a new cluster with the same name with + // Container Insights tuned on, Container Insights will not actually be turned on. + // If you want to preserve the same name for your existing cluster and turn on + // Container Insights, you must wait 7 days before you can re-create it. + // + // [PutAccountSettingDefault]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html + // [PutAccountSetting]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html + // + // This member is required. + Settings []types.ClusterSetting + + noSmithyDocumentSerde +} + +type UpdateClusterSettingsOutput struct { + + // Details about the cluster + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateClusterSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateClusterSettings{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateClusterSettings{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateClusterSettings"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateClusterSettingsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateClusterSettings(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateClusterSettings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateClusterSettings", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go new file mode 100644 index 00000000000..2a8ab4b080c --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Updates the Amazon ECS container agent on a specified container instance. +// Updating the Amazon ECS container agent doesn't interrupt running tasks or +// services on the container instance. The process for updating the agent differs +// depending on whether your container instance was launched with the Amazon +// ECS-optimized AMI or another operating system. +// +// The UpdateContainerAgent API isn't supported for container instances using the +// Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, +// you can update the ecs-init package. This updates the agent. For more +// information, see [Updating the Amazon ECS container agent]in the Amazon Elastic Container Service Developer Guide. +// +// Agent updates with the UpdateContainerAgent API operation do not apply to +// Windows container instances. We recommend that you launch new container +// instances to update the agent version in your Windows clusters. +// +// The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon +// Linux AMI with the ecs-init service installed and running. For help updating +// the Amazon ECS container agent on other operating systems, see [Manually updating the Amazon ECS container agent]in the Amazon +// Elastic Container Service Developer Guide. +// +// [Updating the Amazon ECS container agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/agent-update-ecs-ami.html +// [Manually updating the Amazon ECS container agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent +func (c *Client) UpdateContainerAgent(ctx context.Context, params *UpdateContainerAgentInput, optFns ...func(*Options)) (*UpdateContainerAgentOutput, error) { + if params == nil { params = &UpdateContainerAgentInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateContainerAgent", params, optFns, c.addOperationUpdateContainerAgentMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateContainerAgentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateContainerAgentInput struct { + + // The container instance ID or full ARN entries for the container instance where + // you would like to update the Amazon ECS container agent. + // + // This member is required. + ContainerInstance *string + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // container instance is running on. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + noSmithyDocumentSerde +} + +type UpdateContainerAgentOutput struct { + + // The container instance that the container agent was updated for. + ContainerInstance *types.ContainerInstance + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateContainerAgentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateContainerAgent{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateContainerAgent{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateContainerAgent"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateContainerAgentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateContainerAgent(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateContainerAgent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateContainerAgent", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go new file mode 100644 index 00000000000..8b0d2d116af --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go @@ -0,0 +1,218 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies the status of an Amazon ECS container instance. +// +// Once a container instance has reached an ACTIVE state, you can change the +// status of a container instance to DRAINING to manually remove an instance from +// a cluster, for example to perform system updates, update the Docker daemon, or +// scale down the cluster size. +// +// A container instance can't be changed to DRAINING until it has reached an ACTIVE +// status. If the instance is in any other status, an error will be received. +// +// When you set a container instance to DRAINING , Amazon ECS prevents new tasks +// from being scheduled for placement on the container instance and replacement +// service tasks are started on other container instances in the cluster if the +// resources are available. Service tasks on the container instance that are in the +// PENDING state are stopped immediately. +// +// Service tasks on the container instance that are in the RUNNING state are +// stopped and replaced according to the service's deployment configuration +// parameters, minimumHealthyPercent and maximumPercent . You can change the +// deployment configuration of your service using [UpdateService]. +// +// - If minimumHealthyPercent is below 100%, the scheduler can ignore +// desiredCount temporarily during task replacement. For example, desiredCount is +// four tasks, a minimum of 50% allows the scheduler to stop two existing tasks +// before starting two new tasks. If the minimum is 100%, the service scheduler +// can't remove existing tasks until the replacement tasks are considered healthy. +// Tasks for services that do not use a load balancer are considered healthy if +// they're in the RUNNING state. Tasks for services that use a load balancer are +// considered healthy if they're in the RUNNING state and are reported as healthy +// by the load balancer. +// +// - The maximumPercent parameter represents an upper limit on the number of +// running tasks during task replacement. You can use this to define the +// replacement batch size. For example, if desiredCount is four tasks, a maximum +// of 200% starts four new tasks before stopping the four tasks to be drained, +// provided that the cluster resources required to do this are available. If the +// maximum is 100%, then replacement tasks can't start until the draining tasks +// have stopped. +// +// Any PENDING or RUNNING tasks that do not belong to a service aren't affected. +// You must wait for them to finish or stop them manually. +// +// A container instance has completed draining when it has no more RUNNING tasks. +// You can verify this using [ListTasks]. +// +// When a container instance has been drained, you can set a container instance to +// ACTIVE status and once it has reached that status the Amazon ECS scheduler can +// begin scheduling tasks on the instance again. +// +// [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html +// [ListTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListTasks.html +func (c *Client) UpdateContainerInstancesState(ctx context.Context, params *UpdateContainerInstancesStateInput, optFns ...func(*Options)) (*UpdateContainerInstancesStateOutput, error) { + if params == nil { params = &UpdateContainerInstancesStateInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateContainerInstancesState", params, optFns, c.addOperationUpdateContainerInstancesStateMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateContainerInstancesStateOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateContainerInstancesStateInput struct { + + // A list of up to 10 container instance IDs or full ARN entries. + // + // This member is required. + ContainerInstances []string + + // The container instance state to update the container instance with. The only + // valid values for this action are ACTIVE and DRAINING . A container instance can + // only be updated to DRAINING status once it has reached an ACTIVE state. If a + // container instance is in REGISTERING , DEREGISTERING , or REGISTRATION_FAILED + // state you can describe the container instance but can't update the container + // instance state. + // + // This member is required. + Status types.ContainerInstanceStatus + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // container instance to update. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string + + noSmithyDocumentSerde +} + +type UpdateContainerInstancesStateOutput struct { + + // The list of container instances. + ContainerInstances []types.ContainerInstance + + // Any failures associated with the call. + Failures []types.Failure + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateContainerInstancesStateMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateContainerInstancesState{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateContainerInstancesState{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateContainerInstancesState"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateContainerInstancesStateValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateContainerInstancesState(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateContainerInstancesState(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateContainerInstancesState", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go new file mode 100644 index 00000000000..8a8d5a7146d --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go @@ -0,0 +1,461 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies the parameters of a service. +// +// On March 21, 2024, a change was made to resolve the task definition revision +// before authorization. When a task definition revision is not specified, +// authorization will occur using the latest revision of a task definition. +// +// For services using the rolling update ( ECS ) you can update the desired count, +// deployment configuration, network configuration, load balancers, service +// registries, enable ECS managed tags option, propagate tags option, task +// placement constraints and strategies, and task definition. When you update any +// of these parameters, Amazon ECS starts new tasks with the new configuration. +// +// You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume +// when starting or running a task, or when creating or updating a service. For +// more infomation, see [Amazon EBS volumes]in the Amazon Elastic Container Service Developer Guide. +// You can update your volume configurations and trigger a new deployment. +// volumeConfigurations is only supported for REPLICA service and not DAEMON +// service. If you leave volumeConfigurations null , it doesn't trigger a new +// deployment. For more infomation on volumes, see [Amazon EBS volumes]in the Amazon Elastic Container +// Service Developer Guide. +// +// For services using the blue/green ( CODE_DEPLOY ) deployment controller, only +// the desired count, deployment configuration, health check grace period, task +// placement constraints and strategies, enable ECS managed tags option, and +// propagate tags can be updated using this API. If the network configuration, +// platform version, task definition, or load balancer need to be updated, create a +// new CodeDeploy deployment. For more information, see [CreateDeployment]in the CodeDeploy API +// Reference. +// +// For services using an external deployment controller, you can update only the +// desired count, task placement constraints and strategies, health check grace +// period, enable ECS managed tags option, and propagate tags option, using this +// API. If the launch type, load balancer, network configuration, platform version, +// or task definition need to be updated, create a new task set For more +// information, see [CreateTaskSet]. +// +// You can add to or subtract from the number of instantiations of a task +// definition in a service by specifying the cluster that the service is running in +// and a new desiredCount parameter. +// +// You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume +// when starting or running a task, or when creating or updating a service. For +// more infomation, see [Amazon EBS volumes]in the Amazon Elastic Container Service Developer Guide. +// +// If you have updated the container image of your application, you can create a +// new task definition with that image and deploy it to your service. The service +// scheduler uses the minimum healthy percent and maximum percent parameters (in +// the service's deployment configuration) to determine the deployment strategy. +// +// If your updated Docker image uses the same tag as what is in the existing task +// definition for your service (for example, my_image:latest ), you don't need to +// create a new revision of your task definition. You can update the service using +// the forceNewDeployment option. The new tasks launched by the deployment pull +// the current image/tag combination from your repository when they start. +// +// You can also update the deployment configuration of a service. When a +// deployment is triggered by updating the task definition of a service, the +// service scheduler uses the deployment configuration parameters, +// minimumHealthyPercent and maximumPercent , to determine the deployment strategy. +// +// - If minimumHealthyPercent is below 100%, the scheduler can ignore +// desiredCount temporarily during a deployment. For example, if desiredCount is +// four tasks, a minimum of 50% allows the scheduler to stop two existing tasks +// before starting two new tasks. Tasks for services that don't use a load balancer +// are considered healthy if they're in the RUNNING state. Tasks for services +// that use a load balancer are considered healthy if they're in the RUNNING +// state and are reported as healthy by the load balancer. +// +// - The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment. You can use it to define the deployment batch +// size. For example, if desiredCount is four tasks, a maximum of 200% starts +// four new tasks before stopping the four older tasks (provided that the cluster +// resources required to do this are available). +// +// When [UpdateService] stops a task during a deployment, the equivalent of docker stop is issued +// to the containers running in the task. This results in a SIGTERM and a +// 30-second timeout. After this, SIGKILL is sent and the containers are forcibly +// stopped. If the container handles the SIGTERM gracefully and exits within 30 +// seconds from receiving it, no SIGKILL is sent. +// +// When the service scheduler launches new tasks, it determines task placement in +// your cluster with the following logic. +// +// - Determine which of the container instances in your cluster can support your +// service's task definition. For example, they have the required CPU, memory, +// ports, and container instance attributes. +// +// - By default, the service scheduler attempts to balance tasks across +// Availability Zones in this manner even though you can choose a different +// placement strategy. +// +// - Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, if +// zone A has one running service task and zones B and C each have zero, valid +// container instances in either zone B or C are considered optimal for placement. +// +// - Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +// +// When the service scheduler stops running tasks, it attempts to maintain balance +// across the Availability Zones in your cluster using the following logic: +// +// - Sort the container instances by the largest number of running tasks for +// this service in the same Availability Zone as the instance. For example, if zone +// A has one running service task and zones B and C each have two, container +// instances in either zone B or C are considered optimal for termination. +// +// - Stop the task on a container instance in an optimal Availability Zone +// (based on the previous steps), favoring container instances with the largest +// number of running tasks for this service. +// +// You must have a service-linked role when you update any of the following +// service properties: +// +// - loadBalancers , +// +// - serviceRegistries +// +// For more information about the role see the CreateService request parameter [role]role +// . +// +// [role]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html#ECS-CreateService-request-role +// [CreateTaskSet]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateTaskSet.html +// [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html +// [Amazon EBS volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types +// [CreateDeployment]: https://docs.aws.amazon.com/codedeploy/latest/APIReference/API_CreateDeployment.html +func (c *Client) UpdateService(ctx context.Context, params *UpdateServiceInput, optFns ...func(*Options)) (*UpdateServiceOutput, error) { + if params == nil { params = &UpdateServiceInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateService", params, optFns, c.addOperationUpdateServiceMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateServiceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateServiceInput struct { + + // The name of the service to update. + // + // This member is required. + Service *string + + // The capacity provider strategy to update the service to use. + // + // if the service uses the default capacity provider strategy for the cluster, the + // service can be updated to use one or more capacity providers as opposed to the + // default capacity provider strategy. However, when a service is using a capacity + // provider strategy that's not the default capacity provider strategy, the service + // can't be updated to use the cluster's default capacity provider strategy. + // + // A capacity provider strategy consists of one or more capacity providers along + // with the base and weight to assign to them. A capacity provider must be + // associated with the cluster to be used in a capacity provider strategy. The [PutClusterCapacityProviders]API + // is used to associate a capacity provider with a cluster. Only capacity providers + // with an ACTIVE or UPDATING status can be used. + // + // If specifying a capacity provider that uses an Auto Scaling group, the capacity + // provider must already be created. New capacity providers can be created with the + // [CreateClusterCapacityProvider]API operation. + // + // To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT + // capacity providers. The Fargate capacity providers are available to all accounts + // and only need to be associated with a cluster to be used. + // + // The [PutClusterCapacityProviders]API operation is used to update the list of available capacity providers + // for a cluster after the cluster is created. + // + // [PutClusterCapacityProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html + // [CreateClusterCapacityProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateClusterCapacityProvider.html + CapacityProviderStrategy []types.CapacityProviderStrategyItem + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // service runs on. If you do not specify a cluster, the default cluster is + // assumed. + Cluster *string + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *types.DeploymentConfiguration + + // The number of instantiations of the task to place and keep running in your + // service. + DesiredCount *int32 + + // Determines whether to turn on Amazon ECS managed tags for the tasks in the + // service. For more information, see [Tagging Your Amazon ECS Resources]in the Amazon Elastic Container Service + // Developer Guide. + // + // Only tasks launched after the update will reflect the update. To update the + // tags on all tasks, set forceNewDeployment to true , so that Amazon ECS starts + // new tasks with the updated tags. + // + // [Tagging Your Amazon ECS Resources]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html + EnableECSManagedTags *bool + + // If true , this enables execute command functionality on all task containers. + // + // If you do not want to override the value that was set when the service was + // created, you can set this to null when performing this action. + EnableExecuteCommand *bool + + // Determines whether to force a new deployment of the service. By default, + // deployments aren't forced. You can use this option to start a new deployment + // with no service definition changes. For example, you can update a service's + // tasks to use a newer Docker image with the same image/tag combination ( + // my_image:latest ) or to roll Fargate tasks onto a newer platform version. + ForceNewDeployment bool + + // The period of time, in seconds, that the Amazon ECS service scheduler ignores + // unhealthy Elastic Load Balancing target health checks after a task has first + // started. This is only valid if your service is configured to use a load + // balancer. If your service's tasks take a while to start and respond to Elastic + // Load Balancing health checks, you can specify a health check grace period of up + // to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler + // ignores the Elastic Load Balancing health check status. This grace period can + // prevent the ECS service scheduler from marking tasks as unhealthy and stopping + // them before they have time to come up. + HealthCheckGracePeriodSeconds *int32 + + // A list of Elastic Load Balancing load balancer objects. It contains the load + // balancer name, the container name, and the container port to access from the + // load balancer. The container name is as it appears in a container definition. + // + // When you add, update, or remove a load balancer configuration, Amazon ECS + // starts new tasks with the updated Elastic Load Balancing configuration, and then + // stops the old tasks when the new tasks are running. + // + // For services that use rolling updates, you can add, update, or remove Elastic + // Load Balancing target groups. You can update from a single target group to + // multiple target groups and from multiple target groups to a single target group. + // + // For services that use blue/green deployments, you can update Elastic Load + // Balancing target groups by using [CreateDeployment]through CodeDeploy. Note that multiple target + // groups are not supported for blue/green deployments. For more information see [Register multiple target groups with a service] + // in the Amazon Elastic Container Service Developer Guide. + // + // For services that use the external deployment controller, you can add, update, + // or remove load balancers by using [CreateTaskSet]. Note that multiple target groups are not + // supported for external deployments. For more information see [Register multiple target groups with a service]in the Amazon + // Elastic Container Service Developer Guide. + // + // You can remove existing loadBalancers by passing an empty list. + // + // [CreateTaskSet]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateTaskSet.html + // [Register multiple target groups with a service]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html + // [CreateDeployment]: https://docs.aws.amazon.com/codedeploy/latest/APIReference/API_CreateDeployment.html + LoadBalancers []types.LoadBalancer + + // An object representing the network configuration for the service. + NetworkConfiguration *types.NetworkConfiguration + + // An array of task placement constraint objects to update the service to use. If + // no value is specified, the existing placement constraints for the service will + // remain unchanged. If this value is specified, it will override any existing + // placement constraints defined for the service. To remove all existing placement + // constraints, specify an empty array. + // + // You can specify a maximum of 10 constraints for each task. This limit includes + // constraints in the task definition and those specified at runtime. + PlacementConstraints []types.PlacementConstraint + + // The task placement strategy objects to update the service to use. If no value + // is specified, the existing placement strategy for the service will remain + // unchanged. If this value is specified, it will override the existing placement + // strategy defined for the service. To remove an existing placement strategy, + // specify an empty object. + // + // You can specify a maximum of five strategy rules for each service. + PlacementStrategy []types.PlacementStrategy + + // The platform version that your tasks in the service run on. A platform version + // is only specified for tasks using the Fargate launch type. If a platform version + // is not specified, the LATEST platform version is used. For more information, + // see [Fargate Platform Versions]in the Amazon Elastic Container Service Developer Guide. + // + // [Fargate Platform Versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // Determines whether to propagate the tags from the task definition or the + // service to the task. If no value is specified, the tags aren't propagated. + // + // Only tasks launched after the update will reflect the update. To update the + // tags on all tasks, set forceNewDeployment to true , so that Amazon ECS starts + // new tasks with the updated tags. + PropagateTags types.PropagateTags + + // The configuration for this service to discover and connect to services, and be + // discovered by, and connected from, other services within a namespace. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectConfiguration *types.ServiceConnectConfiguration + + // The details for the service discovery registries to assign to this service. For + // more information, see [Service Discovery]. + // + // When you add, update, or remove the service registries configuration, Amazon + // ECS starts new tasks with the updated service registries configuration, and then + // stops the old tasks when the new tasks are running. + // + // You can remove existing serviceRegistries by passing an empty list. + // + // [Service Discovery]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html + ServiceRegistries []types.ServiceRegistry + + // The family and revision ( family:revision ) or full ARN of the task definition + // to run in your service. If a revision is not specified, the latest ACTIVE + // revision is used. If you modify the task definition with UpdateService , Amazon + // ECS spawns a task with the new version of the task definition and then stops an + // old task after the new version is running. + TaskDefinition *string + + // The details of the volume that was configuredAtLaunch . You can configure the + // size, volumeType, IOPS, throughput, snapshot and encryption in [ServiceManagedEBSVolumeConfiguration]. The name of + // the volume must match the name from the task definition. If set to null, no new + // deployment is triggered. Otherwise, if this configuration differs from the + // existing one, it triggers a new deployment. + // + // [ServiceManagedEBSVolumeConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ServiceManagedEBSVolumeConfiguration.html + VolumeConfigurations []types.ServiceVolumeConfiguration + + noSmithyDocumentSerde +} + +type UpdateServiceOutput struct { + + // The full description of your service following the update call. + Service *types.Service + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateServiceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateService{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateService{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateService"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateServiceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateService(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateService(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateService", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go new file mode 100644 index 00000000000..1d2e86374c5 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies which task set in a service is the primary task set. Any parameters +// that are updated on the primary task set in a service will transition to the +// service. This is used when a service uses the EXTERNAL deployment controller +// type. For more information, see [Amazon ECS Deployment Types]in the Amazon Elastic Container Service +// Developer Guide. +// +// [Amazon ECS Deployment Types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +func (c *Client) UpdateServicePrimaryTaskSet(ctx context.Context, params *UpdateServicePrimaryTaskSetInput, optFns ...func(*Options)) (*UpdateServicePrimaryTaskSetOutput, error) { + if params == nil { params = &UpdateServicePrimaryTaskSetInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateServicePrimaryTaskSet", params, optFns, c.addOperationUpdateServicePrimaryTaskSetMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateServicePrimaryTaskSetOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateServicePrimaryTaskSetInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service that the task set exists in. + // + // This member is required. + Cluster *string + + // The short name or full Amazon Resource Name (ARN) of the task set to set as the + // primary task set in the deployment. + // + // This member is required. + PrimaryTaskSet *string + + // The short name or full Amazon Resource Name (ARN) of the service that the task + // set exists in. + // + // This member is required. + Service *string + + noSmithyDocumentSerde +} + +type UpdateServicePrimaryTaskSetOutput struct { + + // The details about the task set. + TaskSet *types.TaskSet + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateServicePrimaryTaskSetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateServicePrimaryTaskSet{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateServicePrimaryTaskSet{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateServicePrimaryTaskSet"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateServicePrimaryTaskSetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateServicePrimaryTaskSet(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateServicePrimaryTaskSet(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateServicePrimaryTaskSet", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go new file mode 100644 index 00000000000..d71a2f11599 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Updates the protection status of a task. You can set protectionEnabled to true +// to protect your task from termination during scale-in events from [Service Autoscaling]or [deployments]. +// +// Task-protection, by default, expires after 2 hours at which point Amazon ECS +// clears the protectionEnabled property making the task eligible for termination +// by a subsequent scale-in event. +// +// You can specify a custom expiration period for task protection from 1 minute to +// up to 2,880 minutes (48 hours). To specify the custom expiration period, set the +// expiresInMinutes property. The expiresInMinutes property is always reset when +// you invoke this operation for a task that already has protectionEnabled set to +// true . You can keep extending the protection expiration period of a task by +// invoking this operation repeatedly. +// +// To learn more about Amazon ECS task protection, see [Task scale-in protection] in the Amazon Elastic +// Container Service Developer Guide . +// +// This operation is only supported for tasks belonging to an Amazon ECS service. +// Invoking this operation for a standalone task will result in an TASK_NOT_VALID +// failure. For more information, see [API failure reasons]. +// +// If you prefer to set task protection from within the container, we recommend +// using the [Task scale-in protection endpoint]. +// +// [deployments]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +// [API failure reasons]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html +// [Task scale-in protection endpoint]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-scale-in-protection-endpoint.html +// [Task scale-in protection]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-scale-in-protection.html +// [Service Autoscaling]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-auto-scaling.html +func (c *Client) UpdateTaskProtection(ctx context.Context, params *UpdateTaskProtectionInput, optFns ...func(*Options)) (*UpdateTaskProtectionOutput, error) { + if params == nil { params = &UpdateTaskProtectionInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateTaskProtection", params, optFns, c.addOperationUpdateTaskProtectionMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateTaskProtectionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateTaskProtectionInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service that the task sets exist in. + // + // This member is required. + Cluster *string + + // Specify true to mark a task for protection and false to unset protection, + // making it eligible for termination. + // + // This member is required. + ProtectionEnabled bool + + // A list of up to 10 task IDs or full ARN entries. + // + // This member is required. + Tasks []string + + // If you set protectionEnabled to true , you can specify the duration for task + // protection in minutes. You can specify a value from 1 minute to up to 2,880 + // minutes (48 hours). During this time, your task will not be terminated by + // scale-in events from Service Auto Scaling or deployments. After this time period + // lapses, protectionEnabled will be reset to false . + // + // If you don’t specify the time, then the task is automatically protected for 120 + // minutes (2 hours). + ExpiresInMinutes *int32 + + noSmithyDocumentSerde +} + +type UpdateTaskProtectionOutput struct { + + // Any failures associated with the call. + Failures []types.Failure + + // A list of tasks with the following information. + // + // - taskArn : The task ARN. + // + // - protectionEnabled : The protection status of the task. If scale-in + // protection is turned on for a task, the value is true . Otherwise, it is false + // . + // + // - expirationDate : The epoch time when protection for the task will expire. + ProtectedTasks []types.ProtectedTask + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateTaskProtectionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateTaskProtection{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateTaskProtection{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTaskProtection"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateTaskProtectionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTaskProtection(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateTaskProtection(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateTaskProtection", + } +} diff --git a/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go b/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go new file mode 100644 index 00000000000..00b04a7e91b --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +// Modifies a task set. This is used when a service uses the EXTERNAL deployment +// controller type. For more information, see [Amazon ECS Deployment Types]in the Amazon Elastic Container +// Service Developer Guide. +// +// [Amazon ECS Deployment Types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +func (c *Client) UpdateTaskSet(ctx context.Context, params *UpdateTaskSetInput, optFns ...func(*Options)) (*UpdateTaskSetOutput, error) { + if params == nil { params = &UpdateTaskSetInput{} } + + result, metadata, err := c.invokeOperation(ctx, "UpdateTaskSet", params, optFns, c.addOperationUpdateTaskSetMiddlewares) + if err != nil { return nil, err } + + out := result.(*UpdateTaskSetOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateTaskSetInput struct { + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts the + // service that the task set is found in. + // + // This member is required. + Cluster *string + + // A floating-point percentage of the desired number of tasks to place and keep + // running in the task set. + // + // This member is required. + Scale *types.Scale + + // The short name or full Amazon Resource Name (ARN) of the service that the task + // set is found in. + // + // This member is required. + Service *string + + // The short name or full Amazon Resource Name (ARN) of the task set to update. + // + // This member is required. + TaskSet *string + + noSmithyDocumentSerde +} + +type UpdateTaskSetOutput struct { + + // Details about the task set. + TaskSet *types.TaskSet + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateTaskSetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateTaskSet{}, middleware.After) + if err != nil { return err } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateTaskSet{}, middleware.After) + if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTaskSet"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateTaskSetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTaskSet(options.Region, ), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateTaskSet(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateTaskSet", + } +} diff --git a/aws-sdk-go-v2/service/ecs/auth.go b/aws-sdk-go-v2/service/ecs/auth.go new file mode 100644 index 00000000000..9358afdea4a --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/auth.go @@ -0,0 +1,317 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "fmt" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/smithy-go/tracing" +) + +func bindAuthParamsRegion( _ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { + +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. +Operation string + +// The region in which the operation is being invoked. +Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "ecs") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), +}, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func (o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/aws-sdk-go-v2/service/ecs/deserializers.go b/aws-sdk-go-v2/service/ecs/deserializers.go new file mode 100644 index 00000000000..21bdd44adbd --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/deserializers.go @@ -0,0 +1,20256 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "bytes" + "context" + "fmt" + "io" + "encoding/json" + "math" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + smithy "github.com/aws/smithy-go" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithyio "github.com/aws/smithy-go/io" + smithytime "github.com/aws/smithy-go/time" + "strings" + "time" + "github.com/aws/smithy-go/tracing" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + +type awsAwsjson11_deserializeOpCreateCapacityProvider struct { +} + +func (*awsAwsjson11_deserializeOpCreateCapacityProvider) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCapacityProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCapacityProvider(response, &metadata) + } + output := &CreateCapacityProviderOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateCapacityProviderOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCapacityProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UpdateInProgressException", errorCode): + return awsAwsjson11_deserializeErrorUpdateInProgressException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateCluster struct { +} + +func (*awsAwsjson11_deserializeOpCreateCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCluster(response, &metadata) + } + output := &CreateClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("NamespaceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNamespaceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateService struct { +} + +func (*awsAwsjson11_deserializeOpCreateService) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateService) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateService(response, &metadata) + } + output := &CreateServiceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateServiceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateService(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("NamespaceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNamespaceNotFoundException(response, errorBody) + + case strings.EqualFold("PlatformTaskDefinitionIncompatibilityException", errorCode): + return awsAwsjson11_deserializeErrorPlatformTaskDefinitionIncompatibilityException(response, errorBody) + + case strings.EqualFold("PlatformUnknownException", errorCode): + return awsAwsjson11_deserializeErrorPlatformUnknownException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateTaskSet struct { +} + +func (*awsAwsjson11_deserializeOpCreateTaskSet) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateTaskSet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateTaskSet(response, &metadata) + } + output := &CreateTaskSetOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateTaskSetOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateTaskSet(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("NamespaceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNamespaceNotFoundException(response, errorBody) + + case strings.EqualFold("PlatformTaskDefinitionIncompatibilityException", errorCode): + return awsAwsjson11_deserializeErrorPlatformTaskDefinitionIncompatibilityException(response, errorBody) + + case strings.EqualFold("PlatformUnknownException", errorCode): + return awsAwsjson11_deserializeErrorPlatformUnknownException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotActiveException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteAccountSetting struct { +} + +func (*awsAwsjson11_deserializeOpDeleteAccountSetting) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteAccountSetting) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteAccountSetting(response, &metadata) + } + output := &DeleteAccountSettingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteAccountSettingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteAccountSetting(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteAttributes struct { +} + +func (*awsAwsjson11_deserializeOpDeleteAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteAttributes(response, &metadata) + } + output := &DeleteAttributesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteAttributesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("TargetNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorTargetNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCapacityProvider struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCapacityProvider) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCapacityProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCapacityProvider(response, &metadata) + } + output := &DeleteCapacityProviderOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteCapacityProviderOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCapacityProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCluster struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCluster(response, &metadata) + } + output := &DeleteClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterContainsContainerInstancesException", errorCode): + return awsAwsjson11_deserializeErrorClusterContainsContainerInstancesException(response, errorBody) + + case strings.EqualFold("ClusterContainsServicesException", errorCode): + return awsAwsjson11_deserializeErrorClusterContainsServicesException(response, errorBody) + + case strings.EqualFold("ClusterContainsTasksException", errorCode): + return awsAwsjson11_deserializeErrorClusterContainsTasksException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UpdateInProgressException", errorCode): + return awsAwsjson11_deserializeErrorUpdateInProgressException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteService struct { +} + +func (*awsAwsjson11_deserializeOpDeleteService) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteService) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteService(response, &metadata) + } + output := &DeleteServiceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteServiceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteService(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteTaskDefinitions struct { +} + +func (*awsAwsjson11_deserializeOpDeleteTaskDefinitions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteTaskDefinitions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTaskDefinitions(response, &metadata) + } + output := &DeleteTaskDefinitionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteTaskDefinitionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteTaskDefinitions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteTaskSet struct { +} + +func (*awsAwsjson11_deserializeOpDeleteTaskSet) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteTaskSet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTaskSet(response, &metadata) + } + output := &DeleteTaskSetOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteTaskSetOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteTaskSet(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotActiveException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + case strings.EqualFold("TaskSetNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorTaskSetNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeregisterContainerInstance struct { +} + +func (*awsAwsjson11_deserializeOpDeregisterContainerInstance) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeregisterContainerInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeregisterContainerInstance(response, &metadata) + } + output := &DeregisterContainerInstanceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeregisterContainerInstanceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeregisterContainerInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeregisterTaskDefinition struct { +} + +func (*awsAwsjson11_deserializeOpDeregisterTaskDefinition) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeregisterTaskDefinition) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeregisterTaskDefinition(response, &metadata) + } + output := &DeregisterTaskDefinitionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeregisterTaskDefinitionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeregisterTaskDefinition(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeCapacityProviders struct { +} + +func (*awsAwsjson11_deserializeOpDescribeCapacityProviders) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeCapacityProviders) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCapacityProviders(response, &metadata) + } + output := &DescribeCapacityProvidersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeCapacityProvidersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeCapacityProviders(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeClusters struct { +} + +func (*awsAwsjson11_deserializeOpDescribeClusters) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeClusters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeClusters(response, &metadata) + } + output := &DescribeClustersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeClustersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeClusters(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeContainerInstances struct { +} + +func (*awsAwsjson11_deserializeOpDescribeContainerInstances) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeContainerInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeContainerInstances(response, &metadata) + } + output := &DescribeContainerInstancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeContainerInstancesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeContainerInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeServices struct { +} + +func (*awsAwsjson11_deserializeOpDescribeServices) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeServices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeServices(response, &metadata) + } + output := &DescribeServicesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeServicesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeServices(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeTaskDefinition struct { +} + +func (*awsAwsjson11_deserializeOpDescribeTaskDefinition) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeTaskDefinition) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTaskDefinition(response, &metadata) + } + output := &DescribeTaskDefinitionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeTaskDefinitionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeTaskDefinition(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeTasks struct { +} + +func (*awsAwsjson11_deserializeOpDescribeTasks) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTasks(response, &metadata) + } + output := &DescribeTasksOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeTasksOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeTaskSets struct { +} + +func (*awsAwsjson11_deserializeOpDescribeTaskSets) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeTaskSets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTaskSets(response, &metadata) + } + output := &DescribeTaskSetsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeTaskSetsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeTaskSets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotActiveException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDiscoverPollEndpoint struct { +} + +func (*awsAwsjson11_deserializeOpDiscoverPollEndpoint) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDiscoverPollEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDiscoverPollEndpoint(response, &metadata) + } + output := &DiscoverPollEndpointOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDiscoverPollEndpointOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDiscoverPollEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpExecuteCommand struct { +} + +func (*awsAwsjson11_deserializeOpExecuteCommand) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpExecuteCommand) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorExecuteCommand(response, &metadata) + } + output := &ExecuteCommandOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentExecuteCommandOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorExecuteCommand(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TargetNotConnectedException", errorCode): + return awsAwsjson11_deserializeErrorTargetNotConnectedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetTaskProtection struct { +} + +func (*awsAwsjson11_deserializeOpGetTaskProtection) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetTaskProtection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetTaskProtection(response, &metadata) + } + output := &GetTaskProtectionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetTaskProtectionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetTaskProtection(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListAccountSettings struct { +} + +func (*awsAwsjson11_deserializeOpListAccountSettings) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListAccountSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListAccountSettings(response, &metadata) + } + output := &ListAccountSettingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListAccountSettingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListAccountSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListAttributes struct { +} + +func (*awsAwsjson11_deserializeOpListAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListAttributes(response, &metadata) + } + output := &ListAttributesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListAttributesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListClusters struct { +} + +func (*awsAwsjson11_deserializeOpListClusters) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListClusters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListClusters(response, &metadata) + } + output := &ListClustersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListClustersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListClusters(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListContainerInstances struct { +} + +func (*awsAwsjson11_deserializeOpListContainerInstances) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListContainerInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListContainerInstances(response, &metadata) + } + output := &ListContainerInstancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListContainerInstancesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListContainerInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListServices struct { +} + +func (*awsAwsjson11_deserializeOpListServices) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListServices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListServices(response, &metadata) + } + output := &ListServicesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListServicesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListServices(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListServicesByNamespace struct { +} + +func (*awsAwsjson11_deserializeOpListServicesByNamespace) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListServicesByNamespace) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListServicesByNamespace(response, &metadata) + } + output := &ListServicesByNamespaceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListServicesByNamespaceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListServicesByNamespace(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("NamespaceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNamespaceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTaskDefinitionFamilies struct { +} + +func (*awsAwsjson11_deserializeOpListTaskDefinitionFamilies) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTaskDefinitionFamilies) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTaskDefinitionFamilies(response, &metadata) + } + output := &ListTaskDefinitionFamiliesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTaskDefinitionFamiliesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTaskDefinitionFamilies(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTaskDefinitions struct { +} + +func (*awsAwsjson11_deserializeOpListTaskDefinitions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTaskDefinitions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTaskDefinitions(response, &metadata) + } + output := &ListTaskDefinitionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTaskDefinitionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTaskDefinitions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTasks struct { +} + +func (*awsAwsjson11_deserializeOpListTasks) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTasks(response, &metadata) + } + output := &ListTasksOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTasksOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutAccountSetting struct { +} + +func (*awsAwsjson11_deserializeOpPutAccountSetting) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutAccountSetting) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutAccountSetting(response, &metadata) + } + output := &PutAccountSettingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutAccountSettingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutAccountSetting(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutAccountSettingDefault struct { +} + +func (*awsAwsjson11_deserializeOpPutAccountSettingDefault) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutAccountSettingDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutAccountSettingDefault(response, &metadata) + } + output := &PutAccountSettingDefaultOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutAccountSettingDefaultOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutAccountSettingDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutAttributes struct { +} + +func (*awsAwsjson11_deserializeOpPutAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutAttributes(response, &metadata) + } + output := &PutAttributesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutAttributesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AttributeLimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorAttributeLimitExceededException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("TargetNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorTargetNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutClusterCapacityProviders struct { +} + +func (*awsAwsjson11_deserializeOpPutClusterCapacityProviders) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutClusterCapacityProviders) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutClusterCapacityProviders(response, &metadata) + } + output := &PutClusterCapacityProvidersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutClusterCapacityProvidersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutClusterCapacityProviders(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UpdateInProgressException", errorCode): + return awsAwsjson11_deserializeErrorUpdateInProgressException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRegisterContainerInstance struct { +} + +func (*awsAwsjson11_deserializeOpRegisterContainerInstance) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRegisterContainerInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRegisterContainerInstance(response, &metadata) + } + output := &RegisterContainerInstanceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentRegisterContainerInstanceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRegisterContainerInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRegisterTaskDefinition struct { +} + +func (*awsAwsjson11_deserializeOpRegisterTaskDefinition) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRegisterTaskDefinition) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRegisterTaskDefinition(response, &metadata) + } + output := &RegisterTaskDefinitionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentRegisterTaskDefinitionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRegisterTaskDefinition(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRunTask struct { +} + +func (*awsAwsjson11_deserializeOpRunTask) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRunTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRunTask(response, &metadata) + } + output := &RunTaskOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentRunTaskOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRunTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("BlockedException", errorCode): + return awsAwsjson11_deserializeErrorBlockedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson11_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("PlatformTaskDefinitionIncompatibilityException", errorCode): + return awsAwsjson11_deserializeErrorPlatformTaskDefinitionIncompatibilityException(response, errorBody) + + case strings.EqualFold("PlatformUnknownException", errorCode): + return awsAwsjson11_deserializeErrorPlatformUnknownException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartTask struct { +} + +func (*awsAwsjson11_deserializeOpStartTask) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartTask(response, &metadata) + } + output := &StartTaskOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartTaskOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStopTask struct { +} + +func (*awsAwsjson11_deserializeOpStopTask) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStopTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStopTask(response, &metadata) + } + output := &StopTaskOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStopTaskOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStopTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSubmitAttachmentStateChanges struct { +} + +func (*awsAwsjson11_deserializeOpSubmitAttachmentStateChanges) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSubmitAttachmentStateChanges) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSubmitAttachmentStateChanges(response, &metadata) + } + output := &SubmitAttachmentStateChangesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSubmitAttachmentStateChangesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSubmitAttachmentStateChanges(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSubmitContainerStateChange struct { +} + +func (*awsAwsjson11_deserializeOpSubmitContainerStateChange) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSubmitContainerStateChange) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSubmitContainerStateChange(response, &metadata) + } + output := &SubmitContainerStateChangeOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSubmitContainerStateChangeOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSubmitContainerStateChange(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSubmitTaskStateChange struct { +} + +func (*awsAwsjson11_deserializeOpSubmitTaskStateChange) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSubmitTaskStateChange) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSubmitTaskStateChange(response, &metadata) + } + output := &SubmitTaskStateChangeOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSubmitTaskStateChangeOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSubmitTaskStateChange(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCapacityProvider struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCapacityProvider) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCapacityProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCapacityProvider(response, &metadata) + } + output := &UpdateCapacityProviderOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateCapacityProviderOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCapacityProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCluster struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCluster(response, &metadata) + } + output := &UpdateClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("NamespaceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNamespaceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateClusterSettings struct { +} + +func (*awsAwsjson11_deserializeOpUpdateClusterSettings) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateClusterSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateClusterSettings(response, &metadata) + } + output := &UpdateClusterSettingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateClusterSettingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateClusterSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateContainerAgent struct { +} + +func (*awsAwsjson11_deserializeOpUpdateContainerAgent) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateContainerAgent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateContainerAgent(response, &metadata) + } + output := &UpdateContainerAgentOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateContainerAgentOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateContainerAgent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("MissingVersionException", errorCode): + return awsAwsjson11_deserializeErrorMissingVersionException(response, errorBody) + + case strings.EqualFold("NoUpdateAvailableException", errorCode): + return awsAwsjson11_deserializeErrorNoUpdateAvailableException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UpdateInProgressException", errorCode): + return awsAwsjson11_deserializeErrorUpdateInProgressException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateContainerInstancesState struct { +} + +func (*awsAwsjson11_deserializeOpUpdateContainerInstancesState) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateContainerInstancesState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateContainerInstancesState(response, &metadata) + } + output := &UpdateContainerInstancesStateOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateContainerInstancesStateOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateContainerInstancesState(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateService struct { +} + +func (*awsAwsjson11_deserializeOpUpdateService) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateService) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateService(response, &metadata) + } + output := &UpdateServiceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateServiceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateService(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("NamespaceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNamespaceNotFoundException(response, errorBody) + + case strings.EqualFold("PlatformTaskDefinitionIncompatibilityException", errorCode): + return awsAwsjson11_deserializeErrorPlatformTaskDefinitionIncompatibilityException(response, errorBody) + + case strings.EqualFold("PlatformUnknownException", errorCode): + return awsAwsjson11_deserializeErrorPlatformUnknownException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotActiveException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateServicePrimaryTaskSet struct { +} + +func (*awsAwsjson11_deserializeOpUpdateServicePrimaryTaskSet) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateServicePrimaryTaskSet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateServicePrimaryTaskSet(response, &metadata) + } + output := &UpdateServicePrimaryTaskSetOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateServicePrimaryTaskSetOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateServicePrimaryTaskSet(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotActiveException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + case strings.EqualFold("TaskSetNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorTaskSetNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateTaskProtection struct { +} + +func (*awsAwsjson11_deserializeOpUpdateTaskProtection) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateTaskProtection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateTaskProtection(response, &metadata) + } + output := &UpdateTaskProtectionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateTaskProtectionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateTaskProtection(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateTaskSet struct { +} + +func (*awsAwsjson11_deserializeOpUpdateTaskSet) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateTaskSet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateTaskSet(response, &metadata) + } + output := &UpdateTaskSetOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateTaskSetOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateTaskSet(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("ClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ServiceNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotActiveException(response, errorBody) + + case strings.EqualFold("ServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorServiceNotFoundException(response, errorBody) + + case strings.EqualFold("TaskSetNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorTaskSetNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedFeatureException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedFeatureException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.AccessDeniedException{} + err := awsAwsjson11_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorAttributeLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.AttributeLimitExceededException{} + err := awsAwsjson11_deserializeDocumentAttributeLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorBlockedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.BlockedException{} + err := awsAwsjson11_deserializeDocumentBlockedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClientException{} + err := awsAwsjson11_deserializeDocumentClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterContainsContainerInstancesException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterContainsContainerInstancesException{} + err := awsAwsjson11_deserializeDocumentClusterContainsContainerInstancesException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterContainsServicesException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterContainsServicesException{} + err := awsAwsjson11_deserializeDocumentClusterContainsServicesException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterContainsTasksException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterContainsTasksException{} + err := awsAwsjson11_deserializeDocumentClusterContainsTasksException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterNotFoundException{} + err := awsAwsjson11_deserializeDocumentClusterNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ConflictException{} + err := awsAwsjson11_deserializeDocumentConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidParameterException{} + err := awsAwsjson11_deserializeDocumentInvalidParameterException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorMissingVersionException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.MissingVersionException{} + err := awsAwsjson11_deserializeDocumentMissingVersionException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNamespaceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NamespaceNotFoundException{} + err := awsAwsjson11_deserializeDocumentNamespaceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNoUpdateAvailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NoUpdateAvailableException{} + err := awsAwsjson11_deserializeDocumentNoUpdateAvailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorPlatformTaskDefinitionIncompatibilityException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PlatformTaskDefinitionIncompatibilityException{} + err := awsAwsjson11_deserializeDocumentPlatformTaskDefinitionIncompatibilityException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorPlatformUnknownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PlatformUnknownException{} + err := awsAwsjson11_deserializeDocumentPlatformUnknownException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorResourceInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ResourceInUseException{} + err := awsAwsjson11_deserializeDocumentResourceInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ResourceNotFoundException{} + err := awsAwsjson11_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServerException{} + err := awsAwsjson11_deserializeDocumentServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServiceNotActiveException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServiceNotActiveException{} + err := awsAwsjson11_deserializeDocumentServiceNotActiveException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServiceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServiceNotFoundException{} + err := awsAwsjson11_deserializeDocumentServiceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTargetNotConnectedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TargetNotConnectedException{} + err := awsAwsjson11_deserializeDocumentTargetNotConnectedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTargetNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TargetNotFoundException{} + err := awsAwsjson11_deserializeDocumentTargetNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTaskSetNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TaskSetNotFoundException{} + err := awsAwsjson11_deserializeDocumentTaskSetNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedFeatureException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedFeatureException{} + err := awsAwsjson11_deserializeDocumentUnsupportedFeatureException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUpdateInProgressException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UpdateInProgressException{} + err := awsAwsjson11_deserializeDocumentUpdateInProgressException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError { + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAttachment(v **types.Attachment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Attachment + if *v == nil { + sv = &types.Attachment{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "details": + if err := awsAwsjson11_deserializeDocumentAttachmentDetails(&sv.Details, value); err != nil { + return err + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAttachmentDetails(v *[]types.KeyValuePair, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyValuePair + if *v == nil { + cv = []types.KeyValuePair{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyValuePair + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyValuePair(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAttachments(v *[]types.Attachment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Attachment + if *v == nil { + cv = []types.Attachment{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Attachment + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAttachment(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAttribute(v **types.Attribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Attribute + if *v == nil { + sv = &types.Attribute{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "targetId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TargetId = ptr.String(jtv) + } + + case "targetType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TargetType to be of type string, got %T instead", value) + } + sv.TargetType = types.TargetType(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAttributeLimitExceededException(v **types.AttributeLimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AttributeLimitExceededException + if *v == nil { + sv = &types.AttributeLimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAttributes(v *[]types.Attribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Attribute + if *v == nil { + cv = []types.Attribute{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Attribute + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAttribute(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAutoScalingGroupProvider(v **types.AutoScalingGroupProvider, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AutoScalingGroupProvider + if *v == nil { + sv = &types.AutoScalingGroupProvider{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "autoScalingGroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AutoScalingGroupArn = ptr.String(jtv) + } + + case "managedDraining": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ManagedDraining to be of type string, got %T instead", value) + } + sv.ManagedDraining = types.ManagedDraining(jtv) + } + + case "managedScaling": + if err := awsAwsjson11_deserializeDocumentManagedScaling(&sv.ManagedScaling, value); err != nil { + return err + } + + case "managedTerminationProtection": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ManagedTerminationProtection to be of type string, got %T instead", value) + } + sv.ManagedTerminationProtection = types.ManagedTerminationProtection(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAwsVpcConfiguration(v **types.AwsVpcConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AwsVpcConfiguration + if *v == nil { + sv = &types.AwsVpcConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "assignPublicIp": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AssignPublicIp to be of type string, got %T instead", value) + } + sv.AssignPublicIp = types.AssignPublicIp(jtv) + } + + case "securityGroups": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.SecurityGroups, value); err != nil { + return err + } + + case "subnets": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Subnets, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentBlockedException(v **types.BlockedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BlockedException + if *v == nil { + sv = &types.BlockedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityProvider(v **types.CapacityProvider, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityProvider + if *v == nil { + sv = &types.CapacityProvider{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "autoScalingGroupProvider": + if err := awsAwsjson11_deserializeDocumentAutoScalingGroupProvider(&sv.AutoScalingGroupProvider, value); err != nil { + return err + } + + case "capacityProviderArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CapacityProviderArn = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityProviderStatus to be of type string, got %T instead", value) + } + sv.Status = types.CapacityProviderStatus(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "updateStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityProviderUpdateStatus to be of type string, got %T instead", value) + } + sv.UpdateStatus = types.CapacityProviderUpdateStatus(jtv) + } + + case "updateStatusReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.UpdateStatusReason = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityProviders(v *[]types.CapacityProvider, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CapacityProvider + if *v == nil { + cv = []types.CapacityProvider{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CapacityProvider + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCapacityProvider(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityProviderStrategy(v *[]types.CapacityProviderStrategyItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CapacityProviderStrategyItem + if *v == nil { + cv = []types.CapacityProviderStrategyItem{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CapacityProviderStrategyItem + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCapacityProviderStrategyItem(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityProviderStrategyItem(v **types.CapacityProviderStrategyItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityProviderStrategyItem + if *v == nil { + sv = &types.CapacityProviderStrategyItem{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "base": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected CapacityProviderStrategyItemBase to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Base = int32(i64) + } + + case "capacityProvider": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CapacityProvider = ptr.String(jtv) + } + + case "weight": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected CapacityProviderStrategyItemWeight to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Weight = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClientException(v **types.ClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClientException + if *v == nil { + sv = &types.ClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCluster(v **types.Cluster, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Cluster + if *v == nil { + sv = &types.Cluster{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "activeServicesCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ActiveServicesCount = int32(i64) + } + + case "attachments": + if err := awsAwsjson11_deserializeDocumentAttachments(&sv.Attachments, value); err != nil { + return err + } + + case "attachmentsStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AttachmentsStatus = ptr.String(jtv) + } + + case "capacityProviders": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.CapacityProviders, value); err != nil { + return err + } + + case "clusterArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterArn = ptr.String(jtv) + } + + case "clusterName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterName = ptr.String(jtv) + } + + case "configuration": + if err := awsAwsjson11_deserializeDocumentClusterConfiguration(&sv.Configuration, value); err != nil { + return err + } + + case "defaultCapacityProviderStrategy": + if err := awsAwsjson11_deserializeDocumentCapacityProviderStrategy(&sv.DefaultCapacityProviderStrategy, value); err != nil { + return err + } + + case "pendingTasksCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.PendingTasksCount = int32(i64) + } + + case "registeredContainerInstancesCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RegisteredContainerInstancesCount = int32(i64) + } + + case "runningTasksCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RunningTasksCount = int32(i64) + } + + case "serviceConnectDefaults": + if err := awsAwsjson11_deserializeDocumentClusterServiceConnectDefaults(&sv.ServiceConnectDefaults, value); err != nil { + return err + } + + case "settings": + if err := awsAwsjson11_deserializeDocumentClusterSettings(&sv.Settings, value); err != nil { + return err + } + + case "statistics": + if err := awsAwsjson11_deserializeDocumentStatistics(&sv.Statistics, value); err != nil { + return err + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterConfiguration(v **types.ClusterConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterConfiguration + if *v == nil { + sv = &types.ClusterConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "executeCommandConfiguration": + if err := awsAwsjson11_deserializeDocumentExecuteCommandConfiguration(&sv.ExecuteCommandConfiguration, value); err != nil { + return err + } + + case "managedStorageConfiguration": + if err := awsAwsjson11_deserializeDocumentManagedStorageConfiguration(&sv.ManagedStorageConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterContainsContainerInstancesException(v **types.ClusterContainsContainerInstancesException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterContainsContainerInstancesException + if *v == nil { + sv = &types.ClusterContainsContainerInstancesException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterContainsServicesException(v **types.ClusterContainsServicesException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterContainsServicesException + if *v == nil { + sv = &types.ClusterContainsServicesException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterContainsTasksException(v **types.ClusterContainsTasksException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterContainsTasksException + if *v == nil { + sv = &types.ClusterContainsTasksException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterNotFoundException(v **types.ClusterNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterNotFoundException + if *v == nil { + sv = &types.ClusterNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusters(v *[]types.Cluster, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Cluster + if *v == nil { + cv = []types.Cluster{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Cluster + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCluster(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterServiceConnectDefaults(v **types.ClusterServiceConnectDefaults, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterServiceConnectDefaults + if *v == nil { + sv = &types.ClusterServiceConnectDefaults{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "namespace": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Namespace = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterSetting(v **types.ClusterSetting, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterSetting + if *v == nil { + sv = &types.ClusterSetting{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClusterSettingName to be of type string, got %T instead", value) + } + sv.Name = types.ClusterSettingName(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterSettings(v *[]types.ClusterSetting, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ClusterSetting + if *v == nil { + cv = []types.ClusterSetting{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ClusterSetting + destAddr := &col + if err := awsAwsjson11_deserializeDocumentClusterSetting(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCompatibilityList(v *[]types.Compatibility, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Compatibility + if *v == nil { + cv = []types.Compatibility{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Compatibility + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Compatibility to be of type string, got %T instead", value) + } + col = types.Compatibility(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConflictException + if *v == nil { + sv = &types.ConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "resourceIds": + if err := awsAwsjson11_deserializeDocumentResourceIds(&sv.ResourceIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainer(v **types.Container, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Container + if *v == nil { + sv = &types.Container{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerArn = ptr.String(jtv) + } + + case "cpu": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Cpu = ptr.String(jtv) + } + + case "exitCode": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ExitCode = ptr.Int32(int32(i64)) + } + + case "gpuIds": + if err := awsAwsjson11_deserializeDocumentGpuIds(&sv.GpuIds, value); err != nil { + return err + } + + case "healthStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected HealthStatus to be of type string, got %T instead", value) + } + sv.HealthStatus = types.HealthStatus(jtv) + } + + case "image": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Image = ptr.String(jtv) + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "lastStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.LastStatus = ptr.String(jtv) + } + + case "managedAgents": + if err := awsAwsjson11_deserializeDocumentManagedAgents(&sv.ManagedAgents, value); err != nil { + return err + } + + case "memory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Memory = ptr.String(jtv) + } + + case "memoryReservation": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MemoryReservation = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "networkBindings": + if err := awsAwsjson11_deserializeDocumentNetworkBindings(&sv.NetworkBindings, value); err != nil { + return err + } + + case "networkInterfaces": + if err := awsAwsjson11_deserializeDocumentNetworkInterfaces(&sv.NetworkInterfaces, value); err != nil { + return err + } + + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Reason = ptr.String(jtv) + } + + case "runtimeId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RuntimeId = ptr.String(jtv) + } + + case "taskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerDefinition(v **types.ContainerDefinition, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContainerDefinition + if *v == nil { + sv = &types.ContainerDefinition{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "command": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Command, value); err != nil { + return err + } + + case "cpu": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Cpu = int32(i64) + } + + case "credentialSpecs": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.CredentialSpecs, value); err != nil { + return err + } + + case "dependsOn": + if err := awsAwsjson11_deserializeDocumentContainerDependencies(&sv.DependsOn, value); err != nil { + return err + } + + case "disableNetworking": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.DisableNetworking = ptr.Bool(jtv) + } + + case "dnsSearchDomains": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.DnsSearchDomains, value); err != nil { + return err + } + + case "dnsServers": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.DnsServers, value); err != nil { + return err + } + + case "dockerLabels": + if err := awsAwsjson11_deserializeDocumentDockerLabelsMap(&sv.DockerLabels, value); err != nil { + return err + } + + case "dockerSecurityOptions": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.DockerSecurityOptions, value); err != nil { + return err + } + + case "entryPoint": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.EntryPoint, value); err != nil { + return err + } + + case "environment": + if err := awsAwsjson11_deserializeDocumentEnvironmentVariables(&sv.Environment, value); err != nil { + return err + } + + case "environmentFiles": + if err := awsAwsjson11_deserializeDocumentEnvironmentFiles(&sv.EnvironmentFiles, value); err != nil { + return err + } + + case "essential": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.Essential = ptr.Bool(jtv) + } + + case "extraHosts": + if err := awsAwsjson11_deserializeDocumentHostEntryList(&sv.ExtraHosts, value); err != nil { + return err + } + + case "firelensConfiguration": + if err := awsAwsjson11_deserializeDocumentFirelensConfiguration(&sv.FirelensConfiguration, value); err != nil { + return err + } + + case "healthCheck": + if err := awsAwsjson11_deserializeDocumentHealthCheck(&sv.HealthCheck, value); err != nil { + return err + } + + case "hostname": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Hostname = ptr.String(jtv) + } + + case "image": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Image = ptr.String(jtv) + } + + case "interactive": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.Interactive = ptr.Bool(jtv) + } + + case "links": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Links, value); err != nil { + return err + } + + case "linuxParameters": + if err := awsAwsjson11_deserializeDocumentLinuxParameters(&sv.LinuxParameters, value); err != nil { + return err + } + + case "logConfiguration": + if err := awsAwsjson11_deserializeDocumentLogConfiguration(&sv.LogConfiguration, value); err != nil { + return err + } + + case "memory": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Memory = ptr.Int32(int32(i64)) + } + + case "memoryReservation": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MemoryReservation = ptr.Int32(int32(i64)) + } + + case "mountPoints": + if err := awsAwsjson11_deserializeDocumentMountPointList(&sv.MountPoints, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "portMappings": + if err := awsAwsjson11_deserializeDocumentPortMappingList(&sv.PortMappings, value); err != nil { + return err + } + + case "privileged": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.Privileged = ptr.Bool(jtv) + } + + case "pseudoTerminal": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.PseudoTerminal = ptr.Bool(jtv) + } + + case "readonlyRootFilesystem": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.ReadonlyRootFilesystem = ptr.Bool(jtv) + } + + case "repositoryCredentials": + if err := awsAwsjson11_deserializeDocumentRepositoryCredentials(&sv.RepositoryCredentials, value); err != nil { + return err + } + + case "resourceRequirements": + if err := awsAwsjson11_deserializeDocumentResourceRequirements(&sv.ResourceRequirements, value); err != nil { + return err + } + + case "restartPolicy": + if err := awsAwsjson11_deserializeDocumentContainerRestartPolicy(&sv.RestartPolicy, value); err != nil { + return err + } + + case "secrets": + if err := awsAwsjson11_deserializeDocumentSecretList(&sv.Secrets, value); err != nil { + return err + } + + case "startTimeout": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.StartTimeout = ptr.Int32(int32(i64)) + } + + case "stopTimeout": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.StopTimeout = ptr.Int32(int32(i64)) + } + + case "systemControls": + if err := awsAwsjson11_deserializeDocumentSystemControls(&sv.SystemControls, value); err != nil { + return err + } + + case "ulimits": + if err := awsAwsjson11_deserializeDocumentUlimitList(&sv.Ulimits, value); err != nil { + return err + } + + case "user": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.User = ptr.String(jtv) + } + + case "volumesFrom": + if err := awsAwsjson11_deserializeDocumentVolumeFromList(&sv.VolumesFrom, value); err != nil { + return err + } + + case "workingDirectory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.WorkingDirectory = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerDefinitions(v *[]types.ContainerDefinition, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ContainerDefinition + if *v == nil { + cv = []types.ContainerDefinition{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ContainerDefinition + destAddr := &col + if err := awsAwsjson11_deserializeDocumentContainerDefinition(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerDependencies(v *[]types.ContainerDependency, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ContainerDependency + if *v == nil { + cv = []types.ContainerDependency{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ContainerDependency + destAddr := &col + if err := awsAwsjson11_deserializeDocumentContainerDependency(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerDependency(v **types.ContainerDependency, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContainerDependency + if *v == nil { + sv = &types.ContainerDependency{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "condition": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContainerCondition to be of type string, got %T instead", value) + } + sv.Condition = types.ContainerCondition(jtv) + } + + case "containerName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerInstance(v **types.ContainerInstance, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContainerInstance + if *v == nil { + sv = &types.ContainerInstance{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "agentConnected": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.AgentConnected = jtv + } + + case "agentUpdateStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AgentUpdateStatus to be of type string, got %T instead", value) + } + sv.AgentUpdateStatus = types.AgentUpdateStatus(jtv) + } + + case "attachments": + if err := awsAwsjson11_deserializeDocumentAttachments(&sv.Attachments, value); err != nil { + return err + } + + case "attributes": + if err := awsAwsjson11_deserializeDocumentAttributes(&sv.Attributes, value); err != nil { + return err + } + + case "capacityProviderName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CapacityProviderName = ptr.String(jtv) + } + + case "containerInstanceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerInstanceArn = ptr.String(jtv) + } + + case "ec2InstanceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Ec2InstanceId = ptr.String(jtv) + } + + case "healthStatus": + if err := awsAwsjson11_deserializeDocumentContainerInstanceHealthStatus(&sv.HealthStatus, value); err != nil { + return err + } + + case "pendingTasksCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.PendingTasksCount = int32(i64) + } + + case "registeredAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.RegisteredAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "registeredResources": + if err := awsAwsjson11_deserializeDocumentResources(&sv.RegisteredResources, value); err != nil { + return err + } + + case "remainingResources": + if err := awsAwsjson11_deserializeDocumentResources(&sv.RemainingResources, value); err != nil { + return err + } + + case "runningTasksCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RunningTasksCount = int32(i64) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "statusReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StatusReason = ptr.String(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "version": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Version = i64 + } + + case "versionInfo": + if err := awsAwsjson11_deserializeDocumentVersionInfo(&sv.VersionInfo, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerInstanceHealthStatus(v **types.ContainerInstanceHealthStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContainerInstanceHealthStatus + if *v == nil { + sv = &types.ContainerInstanceHealthStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "details": + if err := awsAwsjson11_deserializeDocumentInstanceHealthCheckResultList(&sv.Details, value); err != nil { + return err + } + + case "overallStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceHealthCheckState to be of type string, got %T instead", value) + } + sv.OverallStatus = types.InstanceHealthCheckState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerInstances(v *[]types.ContainerInstance, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ContainerInstance + if *v == nil { + cv = []types.ContainerInstance{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ContainerInstance + destAddr := &col + if err := awsAwsjson11_deserializeDocumentContainerInstance(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerOverride(v **types.ContainerOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContainerOverride + if *v == nil { + sv = &types.ContainerOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "command": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Command, value); err != nil { + return err + } + + case "cpu": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Cpu = ptr.Int32(int32(i64)) + } + + case "environment": + if err := awsAwsjson11_deserializeDocumentEnvironmentVariables(&sv.Environment, value); err != nil { + return err + } + + case "environmentFiles": + if err := awsAwsjson11_deserializeDocumentEnvironmentFiles(&sv.EnvironmentFiles, value); err != nil { + return err + } + + case "memory": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Memory = ptr.Int32(int32(i64)) + } + + case "memoryReservation": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MemoryReservation = ptr.Int32(int32(i64)) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "resourceRequirements": + if err := awsAwsjson11_deserializeDocumentResourceRequirements(&sv.ResourceRequirements, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerOverrides(v *[]types.ContainerOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ContainerOverride + if *v == nil { + cv = []types.ContainerOverride{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ContainerOverride + destAddr := &col + if err := awsAwsjson11_deserializeDocumentContainerOverride(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentContainerRestartPolicy(v **types.ContainerRestartPolicy, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContainerRestartPolicy + if *v == nil { + sv = &types.ContainerRestartPolicy{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.Enabled = ptr.Bool(jtv) + } + + case "ignoredExitCodes": + if err := awsAwsjson11_deserializeDocumentIntegerList(&sv.IgnoredExitCodes, value); err != nil { + return err + } + + case "restartAttemptPeriod": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RestartAttemptPeriod = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentContainers(v *[]types.Container, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Container + if *v == nil { + cv = []types.Container{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Container + destAddr := &col + if err := awsAwsjson11_deserializeDocumentContainer(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentDeployment(v **types.Deployment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Deployment + if *v == nil { + sv = &types.Deployment{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProviderStrategy": + if err := awsAwsjson11_deserializeDocumentCapacityProviderStrategy(&sv.CapacityProviderStrategy, value); err != nil { + return err + } + + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "desiredCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.DesiredCount = int32(i64) + } + + case "failedTasks": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.FailedTasks = int32(i64) + } + + case "fargateEphemeralStorage": + if err := awsAwsjson11_deserializeDocumentDeploymentEphemeralStorage(&sv.FargateEphemeralStorage, value); err != nil { + return err + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "launchType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LaunchType to be of type string, got %T instead", value) + } + sv.LaunchType = types.LaunchType(jtv) + } + + case "networkConfiguration": + if err := awsAwsjson11_deserializeDocumentNetworkConfiguration(&sv.NetworkConfiguration, value); err != nil { + return err + } + + case "pendingCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.PendingCount = int32(i64) + } + + case "platformFamily": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformFamily = ptr.String(jtv) + } + + case "platformVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformVersion = ptr.String(jtv) + } + + case "rolloutState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeploymentRolloutState to be of type string, got %T instead", value) + } + sv.RolloutState = types.DeploymentRolloutState(jtv) + } + + case "rolloutStateReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RolloutStateReason = ptr.String(jtv) + } + + case "runningCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RunningCount = int32(i64) + } + + case "serviceConnectConfiguration": + if err := awsAwsjson11_deserializeDocumentServiceConnectConfiguration(&sv.ServiceConnectConfiguration, value); err != nil { + return err + } + + case "serviceConnectResources": + if err := awsAwsjson11_deserializeDocumentServiceConnectServiceResourceList(&sv.ServiceConnectResources, value); err != nil { + return err + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "taskDefinition": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskDefinition = ptr.String(jtv) + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "volumeConfigurations": + if err := awsAwsjson11_deserializeDocumentServiceVolumeConfigurations(&sv.VolumeConfigurations, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeploymentAlarms(v **types.DeploymentAlarms, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeploymentAlarms + if *v == nil { + sv = &types.DeploymentAlarms{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "alarmNames": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.AlarmNames, value); err != nil { + return err + } + + case "enable": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Enable = jtv + } + + case "rollback": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Rollback = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeploymentCircuitBreaker(v **types.DeploymentCircuitBreaker, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeploymentCircuitBreaker + if *v == nil { + sv = &types.DeploymentCircuitBreaker{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "enable": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Enable = jtv + } + + case "rollback": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Rollback = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeploymentConfiguration(v **types.DeploymentConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeploymentConfiguration + if *v == nil { + sv = &types.DeploymentConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "alarms": + if err := awsAwsjson11_deserializeDocumentDeploymentAlarms(&sv.Alarms, value); err != nil { + return err + } + + case "deploymentCircuitBreaker": + if err := awsAwsjson11_deserializeDocumentDeploymentCircuitBreaker(&sv.DeploymentCircuitBreaker, value); err != nil { + return err + } + + case "maximumPercent": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MaximumPercent = ptr.Int32(int32(i64)) + } + + case "minimumHealthyPercent": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MinimumHealthyPercent = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeploymentController(v **types.DeploymentController, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeploymentController + if *v == nil { + sv = &types.DeploymentController{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeploymentControllerType to be of type string, got %T instead", value) + } + sv.Type = types.DeploymentControllerType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeploymentEphemeralStorage(v **types.DeploymentEphemeralStorage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeploymentEphemeralStorage + if *v == nil { + sv = &types.DeploymentEphemeralStorage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "kmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeployments(v *[]types.Deployment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Deployment + if *v == nil { + cv = []types.Deployment{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Deployment + destAddr := &col + if err := awsAwsjson11_deserializeDocumentDeployment(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentDevice(v **types.Device, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Device + if *v == nil { + sv = &types.Device{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerPath = ptr.String(jtv) + } + + case "hostPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.HostPath = ptr.String(jtv) + } + + case "permissions": + if err := awsAwsjson11_deserializeDocumentDeviceCgroupPermissions(&sv.Permissions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDeviceCgroupPermissions(v *[]types.DeviceCgroupPermission, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DeviceCgroupPermission + if *v == nil { + cv = []types.DeviceCgroupPermission{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DeviceCgroupPermission + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeviceCgroupPermission to be of type string, got %T instead", value) + } + col = types.DeviceCgroupPermission(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentDevicesList(v *[]types.Device, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Device + if *v == nil { + cv = []types.Device{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Device + destAddr := &col + if err := awsAwsjson11_deserializeDocumentDevice(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentDockerLabelsMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentDockerVolumeConfiguration(v **types.DockerVolumeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DockerVolumeConfiguration + if *v == nil { + sv = &types.DockerVolumeConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "autoprovision": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.Autoprovision = ptr.Bool(jtv) + } + + case "driver": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Driver = ptr.String(jtv) + } + + case "driverOpts": + if err := awsAwsjson11_deserializeDocumentStringMap(&sv.DriverOpts, value); err != nil { + return err + } + + case "labels": + if err := awsAwsjson11_deserializeDocumentStringMap(&sv.Labels, value); err != nil { + return err + } + + case "scope": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Scope to be of type string, got %T instead", value) + } + sv.Scope = types.Scope(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEBSTagSpecification(v **types.EBSTagSpecification, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EBSTagSpecification + if *v == nil { + sv = &types.EBSTagSpecification{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "propagateTags": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PropagateTags to be of type string, got %T instead", value) + } + sv.PropagateTags = types.PropagateTags(jtv) + } + + case "resourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EBSResourceType to be of type string, got %T instead", value) + } + sv.ResourceType = types.EBSResourceType(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEBSTagSpecifications(v *[]types.EBSTagSpecification, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EBSTagSpecification + if *v == nil { + cv = []types.EBSTagSpecification{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EBSTagSpecification + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEBSTagSpecification(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEFSAuthorizationConfig(v **types.EFSAuthorizationConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EFSAuthorizationConfig + if *v == nil { + sv = &types.EFSAuthorizationConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessPointId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AccessPointId = ptr.String(jtv) + } + + case "iam": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EFSAuthorizationConfigIAM to be of type string, got %T instead", value) + } + sv.Iam = types.EFSAuthorizationConfigIAM(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEFSVolumeConfiguration(v **types.EFSVolumeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EFSVolumeConfiguration + if *v == nil { + sv = &types.EFSVolumeConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationConfig": + if err := awsAwsjson11_deserializeDocumentEFSAuthorizationConfig(&sv.AuthorizationConfig, value); err != nil { + return err + } + + case "fileSystemId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.FileSystemId = ptr.String(jtv) + } + + case "rootDirectory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RootDirectory = ptr.String(jtv) + } + + case "transitEncryption": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EFSTransitEncryption to be of type string, got %T instead", value) + } + sv.TransitEncryption = types.EFSTransitEncryption(jtv) + } + + case "transitEncryptionPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.TransitEncryptionPort = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEnvironmentFile(v **types.EnvironmentFile, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EnvironmentFile + if *v == nil { + sv = &types.EnvironmentFile{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EnvironmentFileType to be of type string, got %T instead", value) + } + sv.Type = types.EnvironmentFileType(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEnvironmentFiles(v *[]types.EnvironmentFile, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EnvironmentFile + if *v == nil { + cv = []types.EnvironmentFile{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EnvironmentFile + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEnvironmentFile(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEnvironmentVariables(v *[]types.KeyValuePair, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyValuePair + if *v == nil { + cv = []types.KeyValuePair{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyValuePair + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyValuePair(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEphemeralStorage(v **types.EphemeralStorage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EphemeralStorage + if *v == nil { + sv = &types.EphemeralStorage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "sizeInGiB": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.SizeInGiB = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentExecuteCommandConfiguration(v **types.ExecuteCommandConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExecuteCommandConfiguration + if *v == nil { + sv = &types.ExecuteCommandConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "kmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + case "logConfiguration": + if err := awsAwsjson11_deserializeDocumentExecuteCommandLogConfiguration(&sv.LogConfiguration, value); err != nil { + return err + } + + case "logging": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecuteCommandLogging to be of type string, got %T instead", value) + } + sv.Logging = types.ExecuteCommandLogging(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentExecuteCommandLogConfiguration(v **types.ExecuteCommandLogConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExecuteCommandLogConfiguration + if *v == nil { + sv = &types.ExecuteCommandLogConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cloudWatchEncryptionEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.CloudWatchEncryptionEnabled = jtv + } + + case "cloudWatchLogGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CloudWatchLogGroupName = ptr.String(jtv) + } + + case "s3BucketName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.S3BucketName = ptr.String(jtv) + } + + case "s3EncryptionEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.S3EncryptionEnabled = jtv + } + + case "s3KeyPrefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.S3KeyPrefix = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentFailure(v **types.Failure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Failure + if *v == nil { + sv = &types.Failure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "detail": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Detail = ptr.String(jtv) + } + + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Reason = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentFailures(v *[]types.Failure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Failure + if *v == nil { + cv = []types.Failure{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Failure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentFailure(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentFirelensConfiguration(v **types.FirelensConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.FirelensConfiguration + if *v == nil { + sv = &types.FirelensConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "options": + if err := awsAwsjson11_deserializeDocumentFirelensConfigurationOptionsMap(&sv.Options, value); err != nil { + return err + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FirelensConfigurationType to be of type string, got %T instead", value) + } + sv.Type = types.FirelensConfigurationType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentFirelensConfigurationOptionsMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentFSxWindowsFileServerAuthorizationConfig(v **types.FSxWindowsFileServerAuthorizationConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.FSxWindowsFileServerAuthorizationConfig + if *v == nil { + sv = &types.FSxWindowsFileServerAuthorizationConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "credentialsParameter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CredentialsParameter = ptr.String(jtv) + } + + case "domain": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Domain = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentFSxWindowsFileServerVolumeConfiguration(v **types.FSxWindowsFileServerVolumeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.FSxWindowsFileServerVolumeConfiguration + if *v == nil { + sv = &types.FSxWindowsFileServerVolumeConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationConfig": + if err := awsAwsjson11_deserializeDocumentFSxWindowsFileServerAuthorizationConfig(&sv.AuthorizationConfig, value); err != nil { + return err + } + + case "fileSystemId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.FileSystemId = ptr.String(jtv) + } + + case "rootDirectory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RootDirectory = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGpuIds(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentHealthCheck(v **types.HealthCheck, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HealthCheck + if *v == nil { + sv = &types.HealthCheck{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "command": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Command, value); err != nil { + return err + } + + case "interval": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Interval = ptr.Int32(int32(i64)) + } + + case "retries": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Retries = ptr.Int32(int32(i64)) + } + + case "startPeriod": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.StartPeriod = ptr.Int32(int32(i64)) + } + + case "timeout": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Timeout = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentHostEntry(v **types.HostEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HostEntry + if *v == nil { + sv = &types.HostEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "hostname": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Hostname = ptr.String(jtv) + } + + case "ipAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.IpAddress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentHostEntryList(v *[]types.HostEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.HostEntry + if *v == nil { + cv = []types.HostEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.HostEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentHostEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentHostVolumeProperties(v **types.HostVolumeProperties, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HostVolumeProperties + if *v == nil { + sv = &types.HostVolumeProperties{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "sourcePath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SourcePath = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInferenceAccelerator(v **types.InferenceAccelerator, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InferenceAccelerator + if *v == nil { + sv = &types.InferenceAccelerator{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DeviceName = ptr.String(jtv) + } + + case "deviceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DeviceType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInferenceAcceleratorOverride(v **types.InferenceAcceleratorOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InferenceAcceleratorOverride + if *v == nil { + sv = &types.InferenceAcceleratorOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DeviceName = ptr.String(jtv) + } + + case "deviceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DeviceType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInferenceAcceleratorOverrides(v *[]types.InferenceAcceleratorOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.InferenceAcceleratorOverride + if *v == nil { + cv = []types.InferenceAcceleratorOverride{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.InferenceAcceleratorOverride + destAddr := &col + if err := awsAwsjson11_deserializeDocumentInferenceAcceleratorOverride(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInferenceAccelerators(v *[]types.InferenceAccelerator, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.InferenceAccelerator + if *v == nil { + cv = []types.InferenceAccelerator{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.InferenceAccelerator + destAddr := &col + if err := awsAwsjson11_deserializeDocumentInferenceAccelerator(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInstanceHealthCheckResult(v **types.InstanceHealthCheckResult, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InstanceHealthCheckResult + if *v == nil { + sv = &types.InstanceHealthCheckResult{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastStatusChange": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.LastStatusChange = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "lastUpdated": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.LastUpdated = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceHealthCheckState to be of type string, got %T instead", value) + } + sv.Status = types.InstanceHealthCheckState(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceHealthCheckType to be of type string, got %T instead", value) + } + sv.Type = types.InstanceHealthCheckType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInstanceHealthCheckResultList(v *[]types.InstanceHealthCheckResult, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.InstanceHealthCheckResult + if *v == nil { + cv = []types.InstanceHealthCheckResult{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.InstanceHealthCheckResult + destAddr := &col + if err := awsAwsjson11_deserializeDocumentInstanceHealthCheckResult(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentIntegerList(v *[]int32, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []int32 + if *v == nil { + cv = []int32{} + } else { + cv = *v + } + + for _, value := range shape { + var col int32 + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + col = int32(i64) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidParameterException(v **types.InvalidParameterException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidParameterException + if *v == nil { + sv = &types.InvalidParameterException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKernelCapabilities(v **types.KernelCapabilities, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KernelCapabilities + if *v == nil { + sv = &types.KernelCapabilities{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "add": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Add, value); err != nil { + return err + } + + case "drop": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Drop, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyValuePair(v **types.KeyValuePair, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyValuePair + if *v == nil { + sv = &types.KeyValuePair{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLinuxParameters(v **types.LinuxParameters, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LinuxParameters + if *v == nil { + sv = &types.LinuxParameters{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capabilities": + if err := awsAwsjson11_deserializeDocumentKernelCapabilities(&sv.Capabilities, value); err != nil { + return err + } + + case "devices": + if err := awsAwsjson11_deserializeDocumentDevicesList(&sv.Devices, value); err != nil { + return err + } + + case "initProcessEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.InitProcessEnabled = ptr.Bool(jtv) + } + + case "maxSwap": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MaxSwap = ptr.Int32(int32(i64)) + } + + case "sharedMemorySize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.SharedMemorySize = ptr.Int32(int32(i64)) + } + + case "swappiness": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Swappiness = ptr.Int32(int32(i64)) + } + + case "tmpfs": + if err := awsAwsjson11_deserializeDocumentTmpfsList(&sv.Tmpfs, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLoadBalancer(v **types.LoadBalancer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LoadBalancer + if *v == nil { + sv = &types.LoadBalancer{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerName = ptr.String(jtv) + } + + case "containerPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ContainerPort = ptr.Int32(int32(i64)) + } + + case "loadBalancerName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.LoadBalancerName = ptr.String(jtv) + } + + case "targetGroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TargetGroupArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLoadBalancers(v *[]types.LoadBalancer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.LoadBalancer + if *v == nil { + cv = []types.LoadBalancer{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.LoadBalancer + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLoadBalancer(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentLogConfiguration(v **types.LogConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LogConfiguration + if *v == nil { + sv = &types.LogConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "logDriver": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LogDriver to be of type string, got %T instead", value) + } + sv.LogDriver = types.LogDriver(jtv) + } + + case "options": + if err := awsAwsjson11_deserializeDocumentLogConfigurationOptionsMap(&sv.Options, value); err != nil { + return err + } + + case "secretOptions": + if err := awsAwsjson11_deserializeDocumentSecretList(&sv.SecretOptions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLogConfigurationOptionsMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentManagedAgent(v **types.ManagedAgent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ManagedAgent + if *v == nil { + sv = &types.ManagedAgent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastStartedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.LastStartedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "lastStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.LastStatus = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ManagedAgentName to be of type string, got %T instead", value) + } + sv.Name = types.ManagedAgentName(jtv) + } + + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Reason = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentManagedAgents(v *[]types.ManagedAgent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ManagedAgent + if *v == nil { + cv = []types.ManagedAgent{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ManagedAgent + destAddr := &col + if err := awsAwsjson11_deserializeDocumentManagedAgent(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentManagedScaling(v **types.ManagedScaling, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ManagedScaling + if *v == nil { + sv = &types.ManagedScaling{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "instanceWarmupPeriod": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ManagedScalingInstanceWarmupPeriod to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.InstanceWarmupPeriod = ptr.Int32(int32(i64)) + } + + case "maximumScalingStepSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ManagedScalingStepSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MaximumScalingStepSize = ptr.Int32(int32(i64)) + } + + case "minimumScalingStepSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ManagedScalingStepSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.MinimumScalingStepSize = ptr.Int32(int32(i64)) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ManagedScalingStatus to be of type string, got %T instead", value) + } + sv.Status = types.ManagedScalingStatus(jtv) + } + + case "targetCapacity": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ManagedScalingTargetCapacity to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.TargetCapacity = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentManagedStorageConfiguration(v **types.ManagedStorageConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ManagedStorageConfiguration + if *v == nil { + sv = &types.ManagedStorageConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fargateEphemeralStorageKmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.FargateEphemeralStorageKmsKeyId = ptr.String(jtv) + } + + case "kmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMissingVersionException(v **types.MissingVersionException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MissingVersionException + if *v == nil { + sv = &types.MissingVersionException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMountPoint(v **types.MountPoint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MountPoint + if *v == nil { + sv = &types.MountPoint{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerPath = ptr.String(jtv) + } + + case "readOnly": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.ReadOnly = ptr.Bool(jtv) + } + + case "sourceVolume": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SourceVolume = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMountPointList(v *[]types.MountPoint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MountPoint + if *v == nil { + cv = []types.MountPoint{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MountPoint + destAddr := &col + if err := awsAwsjson11_deserializeDocumentMountPoint(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNamespaceNotFoundException(v **types.NamespaceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NamespaceNotFoundException + if *v == nil { + sv = &types.NamespaceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNetworkBinding(v **types.NetworkBinding, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NetworkBinding + if *v == nil { + sv = &types.NetworkBinding{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bindIP": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.BindIP = ptr.String(jtv) + } + + case "containerPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ContainerPort = ptr.Int32(int32(i64)) + } + + case "containerPortRange": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerPortRange = ptr.String(jtv) + } + + case "hostPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.HostPort = ptr.Int32(int32(i64)) + } + + case "hostPortRange": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.HostPortRange = ptr.String(jtv) + } + + case "protocol": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TransportProtocol to be of type string, got %T instead", value) + } + sv.Protocol = types.TransportProtocol(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNetworkBindings(v *[]types.NetworkBinding, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NetworkBinding + if *v == nil { + cv = []types.NetworkBinding{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NetworkBinding + destAddr := &col + if err := awsAwsjson11_deserializeDocumentNetworkBinding(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNetworkConfiguration(v **types.NetworkConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NetworkConfiguration + if *v == nil { + sv = &types.NetworkConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "awsvpcConfiguration": + if err := awsAwsjson11_deserializeDocumentAwsVpcConfiguration(&sv.AwsvpcConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNetworkInterface(v **types.NetworkInterface, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NetworkInterface + if *v == nil { + sv = &types.NetworkInterface{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attachmentId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AttachmentId = ptr.String(jtv) + } + + case "ipv6Address": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Ipv6Address = ptr.String(jtv) + } + + case "privateIpv4Address": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PrivateIpv4Address = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNetworkInterfaces(v *[]types.NetworkInterface, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NetworkInterface + if *v == nil { + cv = []types.NetworkInterface{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NetworkInterface + destAddr := &col + if err := awsAwsjson11_deserializeDocumentNetworkInterface(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNoUpdateAvailableException(v **types.NoUpdateAvailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NoUpdateAvailableException + if *v == nil { + sv = &types.NoUpdateAvailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPlacementConstraint(v **types.PlacementConstraint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PlacementConstraint + if *v == nil { + sv = &types.PlacementConstraint{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "expression": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Expression = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlacementConstraintType to be of type string, got %T instead", value) + } + sv.Type = types.PlacementConstraintType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPlacementConstraints(v *[]types.PlacementConstraint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PlacementConstraint + if *v == nil { + cv = []types.PlacementConstraint{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PlacementConstraint + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPlacementConstraint(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPlacementStrategies(v *[]types.PlacementStrategy, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PlacementStrategy + if *v == nil { + cv = []types.PlacementStrategy{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PlacementStrategy + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPlacementStrategy(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPlacementStrategy(v **types.PlacementStrategy, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PlacementStrategy + if *v == nil { + sv = &types.PlacementStrategy{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "field": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Field = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlacementStrategyType to be of type string, got %T instead", value) + } + sv.Type = types.PlacementStrategyType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPlatformTaskDefinitionIncompatibilityException(v **types.PlatformTaskDefinitionIncompatibilityException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PlatformTaskDefinitionIncompatibilityException + if *v == nil { + sv = &types.PlatformTaskDefinitionIncompatibilityException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPlatformUnknownException(v **types.PlatformUnknownException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PlatformUnknownException + if *v == nil { + sv = &types.PlatformUnknownException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPortMapping(v **types.PortMapping, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PortMapping + if *v == nil { + sv = &types.PortMapping{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "appProtocol": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApplicationProtocol to be of type string, got %T instead", value) + } + sv.AppProtocol = types.ApplicationProtocol(jtv) + } + + case "containerPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ContainerPort = ptr.Int32(int32(i64)) + } + + case "containerPortRange": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerPortRange = ptr.String(jtv) + } + + case "hostPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.HostPort = ptr.Int32(int32(i64)) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "protocol": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TransportProtocol to be of type string, got %T instead", value) + } + sv.Protocol = types.TransportProtocol(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPortMappingList(v *[]types.PortMapping, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PortMapping + if *v == nil { + cv = []types.PortMapping{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PortMapping + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPortMapping(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentProtectedTask(v **types.ProtectedTask, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProtectedTask + if *v == nil { + sv = &types.ProtectedTask{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "expirationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.ExpirationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "protectionEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.ProtectionEnabled = jtv + } + + case "taskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentProtectedTasks(v *[]types.ProtectedTask, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ProtectedTask + if *v == nil { + cv = []types.ProtectedTask{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ProtectedTask + destAddr := &col + if err := awsAwsjson11_deserializeDocumentProtectedTask(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentProxyConfiguration(v **types.ProxyConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProxyConfiguration + if *v == nil { + sv = &types.ProxyConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerName = ptr.String(jtv) + } + + case "properties": + if err := awsAwsjson11_deserializeDocumentProxyConfigurationProperties(&sv.Properties, value); err != nil { + return err + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProxyConfigurationType to be of type string, got %T instead", value) + } + sv.Type = types.ProxyConfigurationType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentProxyConfigurationProperties(v *[]types.KeyValuePair, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyValuePair + if *v == nil { + cv = []types.KeyValuePair{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyValuePair + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyValuePair(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryCredentials(v **types.RepositoryCredentials, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryCredentials + if *v == nil { + sv = &types.RepositoryCredentials{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "credentialsParameter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CredentialsParameter = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRequiresAttributes(v *[]types.Attribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Attribute + if *v == nil { + cv = []types.Attribute{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Attribute + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAttribute(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Resource + if *v == nil { + sv = &types.Resource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "doubleValue": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.DoubleValue = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.DoubleValue = f64 + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } + } + + case "integerValue": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.IntegerValue = int32(i64) + } + + case "longValue": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.LongValue = i64 + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "stringSetValue": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.StringSetValue, value); err != nil { + return err + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceIds(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceInUseException(v **types.ResourceInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceInUseException + if *v == nil { + sv = &types.ResourceInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceRequirement(v **types.ResourceRequirement, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceRequirement + if *v == nil { + sv = &types.ResourceRequirement{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceType to be of type string, got %T instead", value) + } + sv.Type = types.ResourceType(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceRequirements(v *[]types.ResourceRequirement, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ResourceRequirement + if *v == nil { + cv = []types.ResourceRequirement{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ResourceRequirement + destAddr := &col + if err := awsAwsjson11_deserializeDocumentResourceRequirement(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentResources(v *[]types.Resource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Resource + if *v == nil { + cv = []types.Resource{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Resource + destAddr := &col + if err := awsAwsjson11_deserializeDocumentResource(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRuntimePlatform(v **types.RuntimePlatform, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RuntimePlatform + if *v == nil { + sv = &types.RuntimePlatform{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cpuArchitecture": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CPUArchitecture to be of type string, got %T instead", value) + } + sv.CpuArchitecture = types.CPUArchitecture(jtv) + } + + case "operatingSystemFamily": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OSFamily to be of type string, got %T instead", value) + } + sv.OperatingSystemFamily = types.OSFamily(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentScale(v **types.Scale, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Scale + if *v == nil { + sv = &types.Scale{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "unit": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScaleUnit to be of type string, got %T instead", value) + } + sv.Unit = types.ScaleUnit(jtv) + } + + case "value": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.Value = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.Value = f64 + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSecret(v **types.Secret, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Secret + if *v == nil { + sv = &types.Secret{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "valueFrom": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ValueFrom = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSecretList(v *[]types.Secret, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Secret + if *v == nil { + cv = []types.Secret{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Secret + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSecret(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServerException + if *v == nil { + sv = &types.ServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentService(v **types.Service, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Service + if *v == nil { + sv = &types.Service{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProviderStrategy": + if err := awsAwsjson11_deserializeDocumentCapacityProviderStrategy(&sv.CapacityProviderStrategy, value); err != nil { + return err + } + + case "clusterArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterArn = ptr.String(jtv) + } + + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "createdBy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CreatedBy = ptr.String(jtv) + } + + case "deploymentConfiguration": + if err := awsAwsjson11_deserializeDocumentDeploymentConfiguration(&sv.DeploymentConfiguration, value); err != nil { + return err + } + + case "deploymentController": + if err := awsAwsjson11_deserializeDocumentDeploymentController(&sv.DeploymentController, value); err != nil { + return err + } + + case "deployments": + if err := awsAwsjson11_deserializeDocumentDeployments(&sv.Deployments, value); err != nil { + return err + } + + case "desiredCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.DesiredCount = int32(i64) + } + + case "enableECSManagedTags": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.EnableECSManagedTags = jtv + } + + case "enableExecuteCommand": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.EnableExecuteCommand = jtv + } + + case "events": + if err := awsAwsjson11_deserializeDocumentServiceEvents(&sv.Events, value); err != nil { + return err + } + + case "healthCheckGracePeriodSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.HealthCheckGracePeriodSeconds = ptr.Int32(int32(i64)) + } + + case "launchType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LaunchType to be of type string, got %T instead", value) + } + sv.LaunchType = types.LaunchType(jtv) + } + + case "loadBalancers": + if err := awsAwsjson11_deserializeDocumentLoadBalancers(&sv.LoadBalancers, value); err != nil { + return err + } + + case "networkConfiguration": + if err := awsAwsjson11_deserializeDocumentNetworkConfiguration(&sv.NetworkConfiguration, value); err != nil { + return err + } + + case "pendingCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.PendingCount = int32(i64) + } + + case "placementConstraints": + if err := awsAwsjson11_deserializeDocumentPlacementConstraints(&sv.PlacementConstraints, value); err != nil { + return err + } + + case "placementStrategy": + if err := awsAwsjson11_deserializeDocumentPlacementStrategies(&sv.PlacementStrategy, value); err != nil { + return err + } + + case "platformFamily": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformFamily = ptr.String(jtv) + } + + case "platformVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformVersion = ptr.String(jtv) + } + + case "propagateTags": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PropagateTags to be of type string, got %T instead", value) + } + sv.PropagateTags = types.PropagateTags(jtv) + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "runningCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RunningCount = int32(i64) + } + + case "schedulingStrategy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SchedulingStrategy to be of type string, got %T instead", value) + } + sv.SchedulingStrategy = types.SchedulingStrategy(jtv) + } + + case "serviceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ServiceArn = ptr.String(jtv) + } + + case "serviceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ServiceName = ptr.String(jtv) + } + + case "serviceRegistries": + if err := awsAwsjson11_deserializeDocumentServiceRegistries(&sv.ServiceRegistries, value); err != nil { + return err + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "taskDefinition": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskDefinition = ptr.String(jtv) + } + + case "taskSets": + if err := awsAwsjson11_deserializeDocumentTaskSets(&sv.TaskSets, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectClientAlias(v **types.ServiceConnectClientAlias, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceConnectClientAlias + if *v == nil { + sv = &types.ServiceConnectClientAlias{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "dnsName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DnsName = ptr.String(jtv) + } + + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PortNumber to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Port = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectClientAliasList(v *[]types.ServiceConnectClientAlias, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceConnectClientAlias + if *v == nil { + cv = []types.ServiceConnectClientAlias{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceConnectClientAlias + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceConnectClientAlias(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectConfiguration(v **types.ServiceConnectConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceConnectConfiguration + if *v == nil { + sv = &types.ServiceConnectConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "logConfiguration": + if err := awsAwsjson11_deserializeDocumentLogConfiguration(&sv.LogConfiguration, value); err != nil { + return err + } + + case "namespace": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Namespace = ptr.String(jtv) + } + + case "services": + if err := awsAwsjson11_deserializeDocumentServiceConnectServiceList(&sv.Services, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectService(v **types.ServiceConnectService, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceConnectService + if *v == nil { + sv = &types.ServiceConnectService{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "clientAliases": + if err := awsAwsjson11_deserializeDocumentServiceConnectClientAliasList(&sv.ClientAliases, value); err != nil { + return err + } + + case "discoveryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DiscoveryName = ptr.String(jtv) + } + + case "ingressPortOverride": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PortNumber to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.IngressPortOverride = ptr.Int32(int32(i64)) + } + + case "portName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PortName = ptr.String(jtv) + } + + case "timeout": + if err := awsAwsjson11_deserializeDocumentTimeoutConfiguration(&sv.Timeout, value); err != nil { + return err + } + + case "tls": + if err := awsAwsjson11_deserializeDocumentServiceConnectTlsConfiguration(&sv.Tls, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectServiceList(v *[]types.ServiceConnectService, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceConnectService + if *v == nil { + cv = []types.ServiceConnectService{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceConnectService + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceConnectService(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectServiceResource(v **types.ServiceConnectServiceResource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceConnectServiceResource + if *v == nil { + sv = &types.ServiceConnectServiceResource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "discoveryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DiscoveryArn = ptr.String(jtv) + } + + case "discoveryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DiscoveryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectServiceResourceList(v *[]types.ServiceConnectServiceResource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceConnectServiceResource + if *v == nil { + cv = []types.ServiceConnectServiceResource{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceConnectServiceResource + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceConnectServiceResource(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectTlsCertificateAuthority(v **types.ServiceConnectTlsCertificateAuthority, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceConnectTlsCertificateAuthority + if *v == nil { + sv = &types.ServiceConnectTlsCertificateAuthority{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "awsPcaAuthorityArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AwsPcaAuthorityArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceConnectTlsConfiguration(v **types.ServiceConnectTlsConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceConnectTlsConfiguration + if *v == nil { + sv = &types.ServiceConnectTlsConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "issuerCertificateAuthority": + if err := awsAwsjson11_deserializeDocumentServiceConnectTlsCertificateAuthority(&sv.IssuerCertificateAuthority, value); err != nil { + return err + } + + case "kmsKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKey = ptr.String(jtv) + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceEvent(v **types.ServiceEvent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceEvent + if *v == nil { + sv = &types.ServiceEvent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceEvents(v *[]types.ServiceEvent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceEvent + if *v == nil { + cv = []types.ServiceEvent{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceEvent + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceEvent(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceManagedEBSVolumeConfiguration(v **types.ServiceManagedEBSVolumeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceManagedEBSVolumeConfiguration + if *v == nil { + sv = &types.ServiceManagedEBSVolumeConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "encrypted": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.Encrypted = ptr.Bool(jtv) + } + + case "filesystemType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TaskFilesystemType to be of type string, got %T instead", value) + } + sv.FilesystemType = types.TaskFilesystemType(jtv) + } + + case "iops": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Iops = ptr.Int32(int32(i64)) + } + + case "kmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EBSKMSKeyId to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IAMRoleArn to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "sizeInGiB": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.SizeInGiB = ptr.Int32(int32(i64)) + } + + case "snapshotId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EBSSnapshotId to be of type string, got %T instead", value) + } + sv.SnapshotId = ptr.String(jtv) + } + + case "tagSpecifications": + if err := awsAwsjson11_deserializeDocumentEBSTagSpecifications(&sv.TagSpecifications, value); err != nil { + return err + } + + case "throughput": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Throughput = ptr.Int32(int32(i64)) + } + + case "volumeType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EBSVolumeType to be of type string, got %T instead", value) + } + sv.VolumeType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceNotActiveException(v **types.ServiceNotActiveException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceNotActiveException + if *v == nil { + sv = &types.ServiceNotActiveException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceNotFoundException(v **types.ServiceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceNotFoundException + if *v == nil { + sv = &types.ServiceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceRegistries(v *[]types.ServiceRegistry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceRegistry + if *v == nil { + cv = []types.ServiceRegistry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceRegistry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceRegistry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceRegistry(v **types.ServiceRegistry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceRegistry + if *v == nil { + sv = &types.ServiceRegistry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerName = ptr.String(jtv) + } + + case "containerPort": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ContainerPort = ptr.Int32(int32(i64)) + } + + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BoxedInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Port = ptr.Int32(int32(i64)) + } + + case "registryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RegistryArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServices(v *[]types.Service, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Service + if *v == nil { + cv = []types.Service{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Service + destAddr := &col + if err := awsAwsjson11_deserializeDocumentService(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceVolumeConfiguration(v **types.ServiceVolumeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceVolumeConfiguration + if *v == nil { + sv = &types.ServiceVolumeConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "managedEBSVolume": + if err := awsAwsjson11_deserializeDocumentServiceManagedEBSVolumeConfiguration(&sv.ManagedEBSVolume, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ECSVolumeName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceVolumeConfigurations(v *[]types.ServiceVolumeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceVolumeConfiguration + if *v == nil { + cv = []types.ServiceVolumeConfiguration{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceVolumeConfiguration + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceVolumeConfiguration(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSession(v **types.Session, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Session + if *v == nil { + sv = &types.Session{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "sessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + case "streamUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StreamUrl = ptr.String(jtv) + } + + case "tokenValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SensitiveString to be of type string, got %T instead", value) + } + sv.TokenValue = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSetting(v **types.Setting, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Setting + if *v == nil { + sv = &types.Setting{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SettingName to be of type string, got %T instead", value) + } + sv.Name = types.SettingName(jtv) + } + + case "principalArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PrincipalArn = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SettingType to be of type string, got %T instead", value) + } + sv.Type = types.SettingType(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSettings(v *[]types.Setting, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Setting + if *v == nil { + cv = []types.Setting{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Setting + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSetting(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentStatistics(v *[]types.KeyValuePair, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyValuePair + if *v == nil { + cv = []types.KeyValuePair{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyValuePair + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyValuePair(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentStringList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentStringMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentSystemControl(v **types.SystemControl, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SystemControl + if *v == nil { + sv = &types.SystemControl{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "namespace": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Namespace = ptr.String(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSystemControls(v *[]types.SystemControl, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SystemControl + if *v == nil { + cv = []types.SystemControl{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SystemControl + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSystemControl(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTags(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTargetNotConnectedException(v **types.TargetNotConnectedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TargetNotConnectedException + if *v == nil { + sv = &types.TargetNotConnectedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTargetNotFoundException(v **types.TargetNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TargetNotFoundException + if *v == nil { + sv = &types.TargetNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTask(v **types.Task, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Task + if *v == nil { + sv = &types.Task{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attachments": + if err := awsAwsjson11_deserializeDocumentAttachments(&sv.Attachments, value); err != nil { + return err + } + + case "attributes": + if err := awsAwsjson11_deserializeDocumentAttributes(&sv.Attributes, value); err != nil { + return err + } + + case "availabilityZone": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AvailabilityZone = ptr.String(jtv) + } + + case "capacityProviderName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CapacityProviderName = ptr.String(jtv) + } + + case "clusterArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterArn = ptr.String(jtv) + } + + case "connectivity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Connectivity to be of type string, got %T instead", value) + } + sv.Connectivity = types.Connectivity(jtv) + } + + case "connectivityAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.ConnectivityAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "containerInstanceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerInstanceArn = ptr.String(jtv) + } + + case "containers": + if err := awsAwsjson11_deserializeDocumentContainers(&sv.Containers, value); err != nil { + return err + } + + case "cpu": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Cpu = ptr.String(jtv) + } + + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "desiredStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DesiredStatus = ptr.String(jtv) + } + + case "enableExecuteCommand": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.EnableExecuteCommand = jtv + } + + case "ephemeralStorage": + if err := awsAwsjson11_deserializeDocumentEphemeralStorage(&sv.EphemeralStorage, value); err != nil { + return err + } + + case "executionStoppedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.ExecutionStoppedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "fargateEphemeralStorage": + if err := awsAwsjson11_deserializeDocumentTaskEphemeralStorage(&sv.FargateEphemeralStorage, value); err != nil { + return err + } + + case "group": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Group = ptr.String(jtv) + } + + case "healthStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected HealthStatus to be of type string, got %T instead", value) + } + sv.HealthStatus = types.HealthStatus(jtv) + } + + case "inferenceAccelerators": + if err := awsAwsjson11_deserializeDocumentInferenceAccelerators(&sv.InferenceAccelerators, value); err != nil { + return err + } + + case "lastStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.LastStatus = ptr.String(jtv) + } + + case "launchType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LaunchType to be of type string, got %T instead", value) + } + sv.LaunchType = types.LaunchType(jtv) + } + + case "memory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Memory = ptr.String(jtv) + } + + case "overrides": + if err := awsAwsjson11_deserializeDocumentTaskOverride(&sv.Overrides, value); err != nil { + return err + } + + case "platformFamily": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformFamily = ptr.String(jtv) + } + + case "platformVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformVersion = ptr.String(jtv) + } + + case "pullStartedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.PullStartedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "pullStoppedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.PullStoppedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "startedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.StartedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "startedBy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StartedBy = ptr.String(jtv) + } + + case "stopCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TaskStopCode to be of type string, got %T instead", value) + } + sv.StopCode = types.TaskStopCode(jtv) + } + + case "stoppedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.StoppedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "stoppedReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StoppedReason = ptr.String(jtv) + } + + case "stoppingAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.StoppingAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "taskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) + } + + case "taskDefinitionArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskDefinitionArn = ptr.String(jtv) + } + + case "version": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Version = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskDefinition(v **types.TaskDefinition, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TaskDefinition + if *v == nil { + sv = &types.TaskDefinition{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "compatibilities": + if err := awsAwsjson11_deserializeDocumentCompatibilityList(&sv.Compatibilities, value); err != nil { + return err + } + + case "containerDefinitions": + if err := awsAwsjson11_deserializeDocumentContainerDefinitions(&sv.ContainerDefinitions, value); err != nil { + return err + } + + case "cpu": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Cpu = ptr.String(jtv) + } + + case "deregisteredAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.DeregisteredAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "ephemeralStorage": + if err := awsAwsjson11_deserializeDocumentEphemeralStorage(&sv.EphemeralStorage, value); err != nil { + return err + } + + case "executionRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ExecutionRoleArn = ptr.String(jtv) + } + + case "family": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Family = ptr.String(jtv) + } + + case "inferenceAccelerators": + if err := awsAwsjson11_deserializeDocumentInferenceAccelerators(&sv.InferenceAccelerators, value); err != nil { + return err + } + + case "ipcMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IpcMode to be of type string, got %T instead", value) + } + sv.IpcMode = types.IpcMode(jtv) + } + + case "memory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Memory = ptr.String(jtv) + } + + case "networkMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkMode to be of type string, got %T instead", value) + } + sv.NetworkMode = types.NetworkMode(jtv) + } + + case "pidMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PidMode to be of type string, got %T instead", value) + } + sv.PidMode = types.PidMode(jtv) + } + + case "placementConstraints": + if err := awsAwsjson11_deserializeDocumentTaskDefinitionPlacementConstraints(&sv.PlacementConstraints, value); err != nil { + return err + } + + case "proxyConfiguration": + if err := awsAwsjson11_deserializeDocumentProxyConfiguration(&sv.ProxyConfiguration, value); err != nil { + return err + } + + case "registeredAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.RegisteredAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "registeredBy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RegisteredBy = ptr.String(jtv) + } + + case "requiresAttributes": + if err := awsAwsjson11_deserializeDocumentRequiresAttributes(&sv.RequiresAttributes, value); err != nil { + return err + } + + case "requiresCompatibilities": + if err := awsAwsjson11_deserializeDocumentCompatibilityList(&sv.RequiresCompatibilities, value); err != nil { + return err + } + + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Revision = int32(i64) + } + + case "runtimePlatform": + if err := awsAwsjson11_deserializeDocumentRuntimePlatform(&sv.RuntimePlatform, value); err != nil { + return err + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TaskDefinitionStatus to be of type string, got %T instead", value) + } + sv.Status = types.TaskDefinitionStatus(jtv) + } + + case "taskDefinitionArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskDefinitionArn = ptr.String(jtv) + } + + case "taskRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskRoleArn = ptr.String(jtv) + } + + case "volumes": + if err := awsAwsjson11_deserializeDocumentVolumeList(&sv.Volumes, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskDefinitionList(v *[]types.TaskDefinition, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TaskDefinition + if *v == nil { + cv = []types.TaskDefinition{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TaskDefinition + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTaskDefinition(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskDefinitionPlacementConstraint(v **types.TaskDefinitionPlacementConstraint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TaskDefinitionPlacementConstraint + if *v == nil { + sv = &types.TaskDefinitionPlacementConstraint{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "expression": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Expression = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TaskDefinitionPlacementConstraintType to be of type string, got %T instead", value) + } + sv.Type = types.TaskDefinitionPlacementConstraintType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskDefinitionPlacementConstraints(v *[]types.TaskDefinitionPlacementConstraint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TaskDefinitionPlacementConstraint + if *v == nil { + cv = []types.TaskDefinitionPlacementConstraint{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TaskDefinitionPlacementConstraint + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTaskDefinitionPlacementConstraint(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskEphemeralStorage(v **types.TaskEphemeralStorage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TaskEphemeralStorage + if *v == nil { + sv = &types.TaskEphemeralStorage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "kmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + case "sizeInGiB": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.SizeInGiB = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskOverride(v **types.TaskOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TaskOverride + if *v == nil { + sv = &types.TaskOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerOverrides": + if err := awsAwsjson11_deserializeDocumentContainerOverrides(&sv.ContainerOverrides, value); err != nil { + return err + } + + case "cpu": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Cpu = ptr.String(jtv) + } + + case "ephemeralStorage": + if err := awsAwsjson11_deserializeDocumentEphemeralStorage(&sv.EphemeralStorage, value); err != nil { + return err + } + + case "executionRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ExecutionRoleArn = ptr.String(jtv) + } + + case "inferenceAcceleratorOverrides": + if err := awsAwsjson11_deserializeDocumentInferenceAcceleratorOverrides(&sv.InferenceAcceleratorOverrides, value); err != nil { + return err + } + + case "memory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Memory = ptr.String(jtv) + } + + case "taskRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskRoleArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTasks(v *[]types.Task, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Task + if *v == nil { + cv = []types.Task{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Task + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTask(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskSet(v **types.TaskSet, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TaskSet + if *v == nil { + sv = &types.TaskSet{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProviderStrategy": + if err := awsAwsjson11_deserializeDocumentCapacityProviderStrategy(&sv.CapacityProviderStrategy, value); err != nil { + return err + } + + case "clusterArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterArn = ptr.String(jtv) + } + + case "computedDesiredCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.ComputedDesiredCount = int32(i64) + } + + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "externalId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ExternalId = ptr.String(jtv) + } + + case "fargateEphemeralStorage": + if err := awsAwsjson11_deserializeDocumentDeploymentEphemeralStorage(&sv.FargateEphemeralStorage, value); err != nil { + return err + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "launchType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LaunchType to be of type string, got %T instead", value) + } + sv.LaunchType = types.LaunchType(jtv) + } + + case "loadBalancers": + if err := awsAwsjson11_deserializeDocumentLoadBalancers(&sv.LoadBalancers, value); err != nil { + return err + } + + case "networkConfiguration": + if err := awsAwsjson11_deserializeDocumentNetworkConfiguration(&sv.NetworkConfiguration, value); err != nil { + return err + } + + case "pendingCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.PendingCount = int32(i64) + } + + case "platformFamily": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformFamily = ptr.String(jtv) + } + + case "platformVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.PlatformVersion = ptr.String(jtv) + } + + case "runningCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.RunningCount = int32(i64) + } + + case "scale": + if err := awsAwsjson11_deserializeDocumentScale(&sv.Scale, value); err != nil { + return err + } + + case "serviceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ServiceArn = ptr.String(jtv) + } + + case "serviceRegistries": + if err := awsAwsjson11_deserializeDocumentServiceRegistries(&sv.ServiceRegistries, value); err != nil { + return err + } + + case "stabilityStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StabilityStatus to be of type string, got %T instead", value) + } + sv.StabilityStatus = types.StabilityStatus(jtv) + } + + case "stabilityStatusAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.StabilityStatusAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "startedBy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StartedBy = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "taskDefinition": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskDefinition = ptr.String(jtv) + } + + case "taskSetArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskSetArn = ptr.String(jtv) + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { return err } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskSetNotFoundException(v **types.TaskSetNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TaskSetNotFoundException + if *v == nil { + sv = &types.TaskSetNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTaskSets(v *[]types.TaskSet, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TaskSet + if *v == nil { + cv = []types.TaskSet{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TaskSet + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTaskSet(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTimeoutConfiguration(v **types.TimeoutConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TimeoutConfiguration + if *v == nil { + sv = &types.TimeoutConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "idleTimeoutSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Duration to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.IdleTimeoutSeconds = ptr.Int32(int32(i64)) + } + + case "perRequestTimeoutSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Duration to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.PerRequestTimeoutSeconds = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTmpfs(v **types.Tmpfs, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tmpfs + if *v == nil { + sv = &types.Tmpfs{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerPath = ptr.String(jtv) + } + + case "mountOptions": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.MountOptions, value); err != nil { + return err + } + + case "size": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.Size = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTmpfsList(v *[]types.Tmpfs, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tmpfs + if *v == nil { + cv = []types.Tmpfs{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tmpfs + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTmpfs(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUlimit(v **types.Ulimit, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Ulimit + if *v == nil { + sv = &types.Ulimit{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "hardLimit": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.HardLimit = int32(i64) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UlimitName to be of type string, got %T instead", value) + } + sv.Name = types.UlimitName(jtv) + } + + case "softLimit": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { return err } + sv.SoftLimit = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUlimitList(v *[]types.Ulimit, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Ulimit + if *v == nil { + cv = []types.Ulimit{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Ulimit + destAddr := &col + if err := awsAwsjson11_deserializeDocumentUlimit(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedFeatureException(v **types.UnsupportedFeatureException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedFeatureException + if *v == nil { + sv = &types.UnsupportedFeatureException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUpdateInProgressException(v **types.UpdateInProgressException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UpdateInProgressException + if *v == nil { + sv = &types.UpdateInProgressException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentVersionInfo(v **types.VersionInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.VersionInfo + if *v == nil { + sv = &types.VersionInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "agentHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AgentHash = ptr.String(jtv) + } + + case "agentVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AgentVersion = ptr.String(jtv) + } + + case "dockerVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DockerVersion = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentVolume(v **types.Volume, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Volume + if *v == nil { + sv = &types.Volume{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "configuredAtLaunch": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.ConfiguredAtLaunch = ptr.Bool(jtv) + } + + case "dockerVolumeConfiguration": + if err := awsAwsjson11_deserializeDocumentDockerVolumeConfiguration(&sv.DockerVolumeConfiguration, value); err != nil { + return err + } + + case "efsVolumeConfiguration": + if err := awsAwsjson11_deserializeDocumentEFSVolumeConfiguration(&sv.EfsVolumeConfiguration, value); err != nil { + return err + } + + case "fsxWindowsFileServerVolumeConfiguration": + if err := awsAwsjson11_deserializeDocumentFSxWindowsFileServerVolumeConfiguration(&sv.FsxWindowsFileServerVolumeConfiguration, value); err != nil { + return err + } + + case "host": + if err := awsAwsjson11_deserializeDocumentHostVolumeProperties(&sv.Host, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentVolumeFrom(v **types.VolumeFrom, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.VolumeFrom + if *v == nil { + sv = &types.VolumeFrom{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "readOnly": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.ReadOnly = ptr.Bool(jtv) + } + + case "sourceContainer": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SourceContainer = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentVolumeFromList(v *[]types.VolumeFrom, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.VolumeFrom + if *v == nil { + cv = []types.VolumeFrom{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.VolumeFrom + destAddr := &col + if err := awsAwsjson11_deserializeDocumentVolumeFrom(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentVolumeList(v *[]types.Volume, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Volume + if *v == nil { + cv = []types.Volume{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Volume + destAddr := &col + if err := awsAwsjson11_deserializeDocumentVolume(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateCapacityProviderOutput(v **CreateCapacityProviderOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateCapacityProviderOutput + if *v == nil { + sv = &CreateCapacityProviderOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProvider": + if err := awsAwsjson11_deserializeDocumentCapacityProvider(&sv.CapacityProvider, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateClusterOutput(v **CreateClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateClusterOutput + if *v == nil { + sv = &CreateClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateServiceOutput(v **CreateServiceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateServiceOutput + if *v == nil { + sv = &CreateServiceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "service": + if err := awsAwsjson11_deserializeDocumentService(&sv.Service, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateTaskSetOutput(v **CreateTaskSetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTaskSetOutput + if *v == nil { + sv = &CreateTaskSetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "taskSet": + if err := awsAwsjson11_deserializeDocumentTaskSet(&sv.TaskSet, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteAccountSettingOutput(v **DeleteAccountSettingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteAccountSettingOutput + if *v == nil { + sv = &DeleteAccountSettingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "setting": + if err := awsAwsjson11_deserializeDocumentSetting(&sv.Setting, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteAttributesOutput(v **DeleteAttributesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteAttributesOutput + if *v == nil { + sv = &DeleteAttributesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attributes": + if err := awsAwsjson11_deserializeDocumentAttributes(&sv.Attributes, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteCapacityProviderOutput(v **DeleteCapacityProviderOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteCapacityProviderOutput + if *v == nil { + sv = &DeleteCapacityProviderOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProvider": + if err := awsAwsjson11_deserializeDocumentCapacityProvider(&sv.CapacityProvider, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteClusterOutput(v **DeleteClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteClusterOutput + if *v == nil { + sv = &DeleteClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteServiceOutput(v **DeleteServiceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteServiceOutput + if *v == nil { + sv = &DeleteServiceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "service": + if err := awsAwsjson11_deserializeDocumentService(&sv.Service, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteTaskDefinitionsOutput(v **DeleteTaskDefinitionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteTaskDefinitionsOutput + if *v == nil { + sv = &DeleteTaskDefinitionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "taskDefinitions": + if err := awsAwsjson11_deserializeDocumentTaskDefinitionList(&sv.TaskDefinitions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteTaskSetOutput(v **DeleteTaskSetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteTaskSetOutput + if *v == nil { + sv = &DeleteTaskSetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "taskSet": + if err := awsAwsjson11_deserializeDocumentTaskSet(&sv.TaskSet, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeregisterContainerInstanceOutput(v **DeregisterContainerInstanceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeregisterContainerInstanceOutput + if *v == nil { + sv = &DeregisterContainerInstanceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerInstance": + if err := awsAwsjson11_deserializeDocumentContainerInstance(&sv.ContainerInstance, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeregisterTaskDefinitionOutput(v **DeregisterTaskDefinitionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeregisterTaskDefinitionOutput + if *v == nil { + sv = &DeregisterTaskDefinitionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "taskDefinition": + if err := awsAwsjson11_deserializeDocumentTaskDefinition(&sv.TaskDefinition, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeCapacityProvidersOutput(v **DescribeCapacityProvidersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeCapacityProvidersOutput + if *v == nil { + sv = &DescribeCapacityProvidersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProviders": + if err := awsAwsjson11_deserializeDocumentCapacityProviders(&sv.CapacityProviders, value); err != nil { + return err + } + + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeClustersOutput(v **DescribeClustersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeClustersOutput + if *v == nil { + sv = &DescribeClustersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "clusters": + if err := awsAwsjson11_deserializeDocumentClusters(&sv.Clusters, value); err != nil { + return err + } + + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeContainerInstancesOutput(v **DescribeContainerInstancesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeContainerInstancesOutput + if *v == nil { + sv = &DescribeContainerInstancesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerInstances": + if err := awsAwsjson11_deserializeDocumentContainerInstances(&sv.ContainerInstances, value); err != nil { + return err + } + + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeServicesOutput(v **DescribeServicesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeServicesOutput + if *v == nil { + sv = &DescribeServicesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "services": + if err := awsAwsjson11_deserializeDocumentServices(&sv.Services, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeTaskDefinitionOutput(v **DescribeTaskDefinitionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeTaskDefinitionOutput + if *v == nil { + sv = &DescribeTaskDefinitionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "taskDefinition": + if err := awsAwsjson11_deserializeDocumentTaskDefinition(&sv.TaskDefinition, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeTaskSetsOutput(v **DescribeTaskSetsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeTaskSetsOutput + if *v == nil { + sv = &DescribeTaskSetsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "taskSets": + if err := awsAwsjson11_deserializeDocumentTaskSets(&sv.TaskSets, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeTasksOutput(v **DescribeTasksOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeTasksOutput + if *v == nil { + sv = &DescribeTasksOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "tasks": + if err := awsAwsjson11_deserializeDocumentTasks(&sv.Tasks, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDiscoverPollEndpointOutput(v **DiscoverPollEndpointOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DiscoverPollEndpointOutput + if *v == nil { + sv = &DiscoverPollEndpointOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "serviceConnectEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ServiceConnectEndpoint = ptr.String(jtv) + } + + case "systemLogsEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SystemLogsEndpoint = ptr.String(jtv) + } + + case "telemetryEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TelemetryEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentExecuteCommandOutput(v **ExecuteCommandOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ExecuteCommandOutput + if *v == nil { + sv = &ExecuteCommandOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "clusterArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterArn = ptr.String(jtv) + } + + case "containerArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerArn = ptr.String(jtv) + } + + case "containerName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContainerName = ptr.String(jtv) + } + + case "interactive": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Interactive = jtv + } + + case "session": + if err := awsAwsjson11_deserializeDocumentSession(&sv.Session, value); err != nil { + return err + } + + case "taskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetTaskProtectionOutput(v **GetTaskProtectionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetTaskProtectionOutput + if *v == nil { + sv = &GetTaskProtectionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "protectedTasks": + if err := awsAwsjson11_deserializeDocumentProtectedTasks(&sv.ProtectedTasks, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListAccountSettingsOutput(v **ListAccountSettingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountSettingsOutput + if *v == nil { + sv = &ListAccountSettingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "settings": + if err := awsAwsjson11_deserializeDocumentSettings(&sv.Settings, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListAttributesOutput(v **ListAttributesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAttributesOutput + if *v == nil { + sv = &ListAttributesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attributes": + if err := awsAwsjson11_deserializeDocumentAttributes(&sv.Attributes, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListClustersOutput(v **ListClustersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListClustersOutput + if *v == nil { + sv = &ListClustersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "clusterArns": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.ClusterArns, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListContainerInstancesOutput(v **ListContainerInstancesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListContainerInstancesOutput + if *v == nil { + sv = &ListContainerInstancesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerInstanceArns": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.ContainerInstanceArns, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListServicesByNamespaceOutput(v **ListServicesByNamespaceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListServicesByNamespaceOutput + if *v == nil { + sv = &ListServicesByNamespaceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "serviceArns": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.ServiceArns, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListServicesOutput(v **ListServicesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListServicesOutput + if *v == nil { + sv = &ListServicesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "serviceArns": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.ServiceArns, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTaskDefinitionFamiliesOutput(v **ListTaskDefinitionFamiliesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTaskDefinitionFamiliesOutput + if *v == nil { + sv = &ListTaskDefinitionFamiliesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "families": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.Families, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTaskDefinitionsOutput(v **ListTaskDefinitionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTaskDefinitionsOutput + if *v == nil { + sv = &ListTaskDefinitionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "taskDefinitionArns": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.TaskDefinitionArns, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTasksOutput(v **ListTasksOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTasksOutput + if *v == nil { + sv = &ListTasksOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "taskArns": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.TaskArns, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutAccountSettingDefaultOutput(v **PutAccountSettingDefaultOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutAccountSettingDefaultOutput + if *v == nil { + sv = &PutAccountSettingDefaultOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "setting": + if err := awsAwsjson11_deserializeDocumentSetting(&sv.Setting, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutAccountSettingOutput(v **PutAccountSettingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutAccountSettingOutput + if *v == nil { + sv = &PutAccountSettingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "setting": + if err := awsAwsjson11_deserializeDocumentSetting(&sv.Setting, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutAttributesOutput(v **PutAttributesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutAttributesOutput + if *v == nil { + sv = &PutAttributesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attributes": + if err := awsAwsjson11_deserializeDocumentAttributes(&sv.Attributes, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutClusterCapacityProvidersOutput(v **PutClusterCapacityProvidersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutClusterCapacityProvidersOutput + if *v == nil { + sv = &PutClusterCapacityProvidersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentRegisterContainerInstanceOutput(v **RegisterContainerInstanceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterContainerInstanceOutput + if *v == nil { + sv = &RegisterContainerInstanceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerInstance": + if err := awsAwsjson11_deserializeDocumentContainerInstance(&sv.ContainerInstance, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentRegisterTaskDefinitionOutput(v **RegisterTaskDefinitionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterTaskDefinitionOutput + if *v == nil { + sv = &RegisterTaskDefinitionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "taskDefinition": + if err := awsAwsjson11_deserializeDocumentTaskDefinition(&sv.TaskDefinition, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentRunTaskOutput(v **RunTaskOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RunTaskOutput + if *v == nil { + sv = &RunTaskOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "tasks": + if err := awsAwsjson11_deserializeDocumentTasks(&sv.Tasks, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStartTaskOutput(v **StartTaskOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartTaskOutput + if *v == nil { + sv = &StartTaskOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "tasks": + if err := awsAwsjson11_deserializeDocumentTasks(&sv.Tasks, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStopTaskOutput(v **StopTaskOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StopTaskOutput + if *v == nil { + sv = &StopTaskOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "task": + if err := awsAwsjson11_deserializeDocumentTask(&sv.Task, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSubmitAttachmentStateChangesOutput(v **SubmitAttachmentStateChangesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SubmitAttachmentStateChangesOutput + if *v == nil { + sv = &SubmitAttachmentStateChangesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "acknowledgment": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Acknowledgment = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSubmitContainerStateChangeOutput(v **SubmitContainerStateChangeOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SubmitContainerStateChangeOutput + if *v == nil { + sv = &SubmitContainerStateChangeOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "acknowledgment": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Acknowledgment = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSubmitTaskStateChangeOutput(v **SubmitTaskStateChangeOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SubmitTaskStateChangeOutput + if *v == nil { + sv = &SubmitTaskStateChangeOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "acknowledgment": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Acknowledgment = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TagResourceOutput + if *v == nil { + sv = &TagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UntagResourceOutput + if *v == nil { + sv = &UntagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateCapacityProviderOutput(v **UpdateCapacityProviderOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateCapacityProviderOutput + if *v == nil { + sv = &UpdateCapacityProviderOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacityProvider": + if err := awsAwsjson11_deserializeDocumentCapacityProvider(&sv.CapacityProvider, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateClusterOutput(v **UpdateClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateClusterOutput + if *v == nil { + sv = &UpdateClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateClusterSettingsOutput(v **UpdateClusterSettingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateClusterSettingsOutput + if *v == nil { + sv = &UpdateClusterSettingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateContainerAgentOutput(v **UpdateContainerAgentOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateContainerAgentOutput + if *v == nil { + sv = &UpdateContainerAgentOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerInstance": + if err := awsAwsjson11_deserializeDocumentContainerInstance(&sv.ContainerInstance, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateContainerInstancesStateOutput(v **UpdateContainerInstancesStateOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateContainerInstancesStateOutput + if *v == nil { + sv = &UpdateContainerInstancesStateOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "containerInstances": + if err := awsAwsjson11_deserializeDocumentContainerInstances(&sv.ContainerInstances, value); err != nil { + return err + } + + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateServiceOutput(v **UpdateServiceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateServiceOutput + if *v == nil { + sv = &UpdateServiceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "service": + if err := awsAwsjson11_deserializeDocumentService(&sv.Service, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateServicePrimaryTaskSetOutput(v **UpdateServicePrimaryTaskSetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateServicePrimaryTaskSetOutput + if *v == nil { + sv = &UpdateServicePrimaryTaskSetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "taskSet": + if err := awsAwsjson11_deserializeDocumentTaskSet(&sv.TaskSet, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateTaskProtectionOutput(v **UpdateTaskProtectionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateTaskProtectionOutput + if *v == nil { + sv = &UpdateTaskProtectionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentFailures(&sv.Failures, value); err != nil { + return err + } + + case "protectedTasks": + if err := awsAwsjson11_deserializeDocumentProtectedTasks(&sv.ProtectedTasks, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateTaskSetOutput(v **UpdateTaskSetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateTaskSetOutput + if *v == nil { + sv = &UpdateTaskSetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "taskSet": + if err := awsAwsjson11_deserializeDocumentTaskSet(&sv.TaskSet, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type protocolErrorInfo struct { + Type string `json:"__type"` + Message string + Code any // nonstandard for awsjson but some services do present the type here +} + +func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) { + var errInfo protocolErrorInfo + if err := decoder.Decode(&errInfo); err != nil { + if err == io.EOF { + return errInfo, nil + } + return errInfo, err + } + + return errInfo, nil +} + +func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) { + if len(headerType) != 0 { + return headerType, true + } else if len(bodyInfo.Type) != 0 { + return bodyInfo.Type, true + } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 { + return code, true + } + return "", false +} diff --git a/aws-sdk-go-v2/service/ecs/doc.go b/aws-sdk-go-v2/service/ecs/doc.go new file mode 100644 index 00000000000..99d6e251c4d --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/doc.go @@ -0,0 +1,26 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ecs provides the API client, operations, and parameter types for Amazon +// EC2 Container Service. +// +// Amazon Elastic Container Service +// +// Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, +// container management service. It makes it easy to run, stop, and manage Docker +// containers. You can host your cluster on a serverless infrastructure that's +// managed by Amazon ECS by launching your services or tasks on Fargate. For more +// control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud +// (Amazon EC2) or External (on-premises) instances that you manage. +// +// Amazon ECS makes it easy to launch and stop container-based applications with +// simple API calls. This makes it easy to get the state of your cluster from a +// centralized service, and gives you access to many familiar Amazon EC2 features. +// +// You can use Amazon ECS to schedule the placement of containers across your +// cluster based on your resource needs, isolation policies, and availability +// requirements. With Amazon ECS, you don't need to operate your own cluster +// management and configuration management systems. You also don't need to worry +// about scaling your management infrastructure. +package ecs + + diff --git a/aws-sdk-go-v2/service/ecs/endpoints.go b/aws-sdk-go-v2/service/ecs/endpoints.go new file mode 100644 index 00000000000..9072160a243 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/endpoints.go @@ -0,0 +1,532 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + "context" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "errors" + "fmt" + "net/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecs/internal/endpoints" + "github.com/aws/smithy-go/middleware" + "os" + "github.com/aws/smithy-go/ptr" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + smithyhttp "github.com/aws/smithy-go/transport/http" + "strings" + "github.com/aws/smithy-go/tracing" + "net/url" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{ URL : url, Source : aws.EndpointSourceCustom } + for _, fn := range optFns { fn(&e) } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { e.SigningRegion = region } + return e, nil }, + ) +} +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "ecs" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if (strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix)) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } + } + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_ECS") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "ECS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } + } + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) +} + +if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) +} + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack +_UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") +} +if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") +} +uriString := _Endpoint + +uri, err := url.Parse(uriString) +if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) +} + +return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + +}, nil +} +if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder +out.WriteString("https://ecs-fips.") +out.WriteString(_Region) +out.WriteString(".") +out.WriteString(_PartitionResult.DualStackDnsSuffix) +return out.String() +}() + +uri, err := url.Parse(uriString) +if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) +} + +return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + +}, nil +} +} +return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") +} +} +if _UseFIPS == true { + if _PartitionResult.SupportsFIPS == true { + uriString := func() string { + var out strings.Builder +out.WriteString("https://ecs-fips.") +out.WriteString(_Region) +out.WriteString(".") +out.WriteString(_PartitionResult.DnsSuffix) +return out.String() +}() + +uri, err := url.Parse(uriString) +if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) +} + +return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + +}, nil +} +return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") +} +if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder +out.WriteString("https://ecs.") +out.WriteString(_Region) +out.WriteString(".") +out.WriteString(_PartitionResult.DualStackDnsSuffix) +return out.String() +}() + +uri, err := url.Parse(uriString) +if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) +} + +return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + +}, nil +} +return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") +} +uriString := func() string { + var out strings.Builder +out.WriteString("https://ecs.") +out.WriteString(_Region) +out.WriteString(".") +out.WriteString(_PartitionResult.DnsSuffix) +return out.String() +}() + +uri, err := url.Parse(uriString) +if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) +} + +return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + +}, nil +} +return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") +} +return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) +params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) +params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) +params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/aws-sdk-go-v2/service/ecs/endpoints_config_test.go b/aws-sdk-go-v2/service/ecs/endpoints_config_test.go new file mode 100644 index 00000000000..d5bc93be031 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/endpoints_config_test.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "context" + "os" + "reflect" + "testing" +) + +type mockConfigSource struct { + global string + service string + ignore bool + } + + // GetIgnoreConfiguredEndpoints is used in knowing when to disable configured + // endpoints feature. + func (m mockConfigSource) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + return m.ignore, m.ignore, nil + } + + // GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use + // with configured endpoints. + func (m mockConfigSource) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if m.service != "" { + return m.service, true, nil + } + return "", false, nil + } + +func TestResolveBaseEndpoint(t *testing.T) { + cases := map[string]struct { + envGlobal string + envService string + envIgnore bool + configGlobal string + configService string + configIgnore bool + clientEndpoint *string + expectURL *string + }{ + "env ignore": { + envGlobal: "https://env-global.dev", + envService: "https://env-ecs.dev", + envIgnore: true, + configGlobal: "http://config-global.dev", + configService: "http://config-ecs.dev", + expectURL: nil, + }, + "env global": { + envGlobal: "https://env-global.dev", + configGlobal: "http://config-global.dev", + configService: "http://config-ecs.dev", + expectURL: aws.String("https://env-global.dev"), + }, + "env service": { + envGlobal: "https://env-global.dev", + envService: "https://env-ecs.dev", + configGlobal: "http://config-global.dev", + configService: "http://config-ecs.dev", + expectURL: aws.String("https://env-ecs.dev"), + }, + "config ignore": { + envGlobal: "https://env-global.dev", + envService: "https://env-ecs.dev", + configGlobal: "http://config-global.dev", + configService: "http://config-ecs.dev", + configIgnore: true, + expectURL: nil, + }, + "config global": { + configGlobal: "http://config-global.dev", + expectURL: aws.String("http://config-global.dev"), + }, + "config service": { + configGlobal: "http://config-global.dev", + configService: "http://config-ecs.dev", + expectURL: aws.String("http://config-ecs.dev"), + }, + "client": { + envGlobal: "https://env-global.dev", + envService: "https://env-ecs.dev", + configGlobal: "http://config-global.dev", + configService: "http://config-ecs.dev", + clientEndpoint: aws.String("https://client-ecs.dev"), + expectURL: aws.String("https://client-ecs.dev"), + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + os.Clearenv() + + awsConfig := aws.Config{} + ignore := c.envIgnore || c.configIgnore + + if c.configGlobal != "" && !ignore { + awsConfig.BaseEndpoint = aws.String(c.configGlobal) + } + + if c.envGlobal != "" { + t.Setenv("AWS_ENDPOINT_URL", c.envGlobal) + if !ignore { + awsConfig.BaseEndpoint = aws.String(c.envGlobal) + } + } + + if c.envService != "" { + t.Setenv("AWS_ENDPOINT_URL_ECS", c.envService) + } + + awsConfig.ConfigSources = []interface{}{ + mockConfigSource{ + global: c.envGlobal, + service: c.envService, + ignore: c.envIgnore, + }, + mockConfigSource{ + global: c.configGlobal, + service: c.configService, + ignore: c.configIgnore, + }, + } + + client := NewFromConfig(awsConfig, func (o *Options) { + if c.clientEndpoint != nil { + o.BaseEndpoint = c.clientEndpoint + } + }) + + if e, a := c.expectURL, client.options.BaseEndpoint; !reflect.DeepEqual(e, a) { + t.Errorf("expect endpoint %v , got %v", e, a) + } + }) + } +} diff --git a/aws-sdk-go-v2/service/ecs/endpoints_test.go b/aws-sdk-go-v2/service/ecs/endpoints_test.go new file mode 100644 index 00000000000..464ef428eff --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/endpoints_test.go @@ -0,0 +1,1859 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "context" + "net/http" + "github.com/aws/smithy-go/ptr" + "reflect" + smithy "github.com/aws/smithy-go" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "strings" + "testing" + "net/url" +) + +// For region af-south-1 with FIPS disabled and DualStack disabled +func TestEndpointCase0(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("af-south-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.af-south-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-east-1 with FIPS disabled and DualStack disabled +func TestEndpointCase1(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-east-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-northeast-1 with FIPS disabled and DualStack disabled +func TestEndpointCase2(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-northeast-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-northeast-2 with FIPS disabled and DualStack disabled +func TestEndpointCase3(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-2"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-northeast-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-northeast-3 with FIPS disabled and DualStack disabled +func TestEndpointCase4(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-3"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-northeast-3.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-south-1 with FIPS disabled and DualStack disabled +func TestEndpointCase5(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-south-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-south-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-southeast-1 with FIPS disabled and DualStack disabled +func TestEndpointCase6(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-southeast-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-southeast-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-southeast-2 with FIPS disabled and DualStack disabled +func TestEndpointCase7(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-southeast-2"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-southeast-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ap-southeast-3 with FIPS disabled and DualStack disabled +func TestEndpointCase8(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-southeast-3"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ap-southeast-3.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region ca-central-1 with FIPS disabled and DualStack disabled +func TestEndpointCase9(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ca-central-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.ca-central-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region eu-central-1 with FIPS disabled and DualStack disabled +func TestEndpointCase10(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("eu-central-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.eu-central-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region eu-north-1 with FIPS disabled and DualStack disabled +func TestEndpointCase11(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("eu-north-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.eu-north-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region eu-south-1 with FIPS disabled and DualStack disabled +func TestEndpointCase12(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("eu-south-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.eu-south-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region eu-west-1 with FIPS disabled and DualStack disabled +func TestEndpointCase13(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("eu-west-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.eu-west-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region eu-west-2 with FIPS disabled and DualStack disabled +func TestEndpointCase14(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("eu-west-2"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.eu-west-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region eu-west-3 with FIPS disabled and DualStack disabled +func TestEndpointCase15(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("eu-west-3"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.eu-west-3.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region me-south-1 with FIPS disabled and DualStack disabled +func TestEndpointCase16(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("me-south-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.me-south-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region sa-east-1 with FIPS disabled and DualStack disabled +func TestEndpointCase17(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("sa-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.sa-east-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-east-1 with FIPS disabled and DualStack disabled +func TestEndpointCase18(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-east-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-east-1 with FIPS enabled and DualStack disabled +func TestEndpointCase19(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-east-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-east-2 with FIPS disabled and DualStack disabled +func TestEndpointCase20(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-2"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-east-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-east-2 with FIPS enabled and DualStack disabled +func TestEndpointCase21(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-2"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-east-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-west-1 with FIPS disabled and DualStack disabled +func TestEndpointCase22(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-west-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-west-1 with FIPS enabled and DualStack disabled +func TestEndpointCase23(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-west-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-west-2 with FIPS disabled and DualStack disabled +func TestEndpointCase24(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-west-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-west-2 with FIPS enabled and DualStack disabled +func TestEndpointCase25(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-west-2.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-east-1 with FIPS enabled and DualStack enabled +func TestEndpointCase26(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-east-1.api.aws") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-east-1 with FIPS disabled and DualStack enabled +func TestEndpointCase27(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-east-1.api.aws") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region cn-north-1 with FIPS disabled and DualStack disabled +func TestEndpointCase28(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("cn-north-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.cn-north-1.amazonaws.com.cn") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region cn-northwest-1 with FIPS disabled and DualStack disabled +func TestEndpointCase29(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("cn-northwest-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.cn-northwest-1.amazonaws.com.cn") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region cn-north-1 with FIPS enabled and DualStack enabled +func TestEndpointCase30(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("cn-north-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.cn-north-1.api.amazonwebservices.com.cn") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region cn-north-1 with FIPS enabled and DualStack disabled +func TestEndpointCase31(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("cn-north-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.cn-north-1.amazonaws.com.cn") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region cn-north-1 with FIPS disabled and DualStack enabled +func TestEndpointCase32(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("cn-north-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.cn-north-1.api.amazonwebservices.com.cn") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-gov-east-1 with FIPS disabled and DualStack disabled +func TestEndpointCase33(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-gov-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-gov-east-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-gov-east-1 with FIPS enabled and DualStack disabled +func TestEndpointCase34(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-gov-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-gov-east-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-gov-west-1 with FIPS disabled and DualStack disabled +func TestEndpointCase35(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-gov-west-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-gov-west-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-gov-west-1 with FIPS enabled and DualStack disabled +func TestEndpointCase36(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-gov-west-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-gov-west-1.amazonaws.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-gov-east-1 with FIPS enabled and DualStack enabled +func TestEndpointCase37(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-gov-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-gov-east-1.api.aws") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-gov-east-1 with FIPS disabled and DualStack enabled +func TestEndpointCase38(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-gov-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-gov-east-1.api.aws") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-iso-east-1 with FIPS disabled and DualStack disabled +func TestEndpointCase39(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-iso-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-iso-east-1.c2s.ic.gov") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-iso-west-1 with FIPS disabled and DualStack disabled +func TestEndpointCase40(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-iso-west-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-iso-west-1.c2s.ic.gov") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-iso-east-1 with FIPS enabled and DualStack enabled +func TestEndpointCase41(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-iso-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "FIPS and DualStack are enabled, but this partition does not support one or both", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} + +// For region us-iso-east-1 with FIPS enabled and DualStack disabled +func TestEndpointCase42(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-iso-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-iso-east-1.c2s.ic.gov") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-iso-east-1 with FIPS disabled and DualStack enabled +func TestEndpointCase43(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-iso-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "DualStack is enabled but this partition does not support DualStack", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} + +// For region us-isob-east-1 with FIPS disabled and DualStack disabled +func TestEndpointCase44(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-isob-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs.us-isob-east-1.sc2s.sgov.gov") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-isob-east-1 with FIPS enabled and DualStack enabled +func TestEndpointCase45(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-isob-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "FIPS and DualStack are enabled, but this partition does not support one or both", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} + +// For region us-isob-east-1 with FIPS enabled and DualStack disabled +func TestEndpointCase46(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-isob-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://ecs-fips.us-isob-east-1.sc2s.sgov.gov") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For region us-isob-east-1 with FIPS disabled and DualStack enabled +func TestEndpointCase47(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-isob-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(true), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "DualStack is enabled but this partition does not support DualStack", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} + +// For custom endpoint with region set and fips disabled and dualstack disabled +func TestEndpointCase48(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +Endpoint: ptr.String("https://example.com"), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://example.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For custom endpoint with region not set and fips disabled and dualstack disabled +func TestEndpointCase49(t *testing.T) { + var params = EndpointParameters{ + UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(false), +Endpoint: ptr.String("https://example.com"), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err != nil { + t.Fatalf("expect no error, got %v", err) +} + +uri, _ := url.Parse("https://example.com") + +expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, +} + +if e, a := expectEndpoint.URI, result.URI; e != a{ + t.Errorf("expect %v URI, got %v", e, a) +} + +if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) +} + +if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) +} +} + +// For custom endpoint with fips enabled and dualstack disabled +func TestEndpointCase50(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(true), +UseDualStack: ptr.Bool(false), +Endpoint: ptr.String("https://example.com"), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "Invalid Configuration: FIPS and custom endpoint are not supported", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} + +// For custom endpoint with fips disabled and dualstack enabled +func TestEndpointCase51(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), +UseFIPS: ptr.Bool(false), +UseDualStack: ptr.Bool(true), +Endpoint: ptr.String("https://example.com"), +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "Invalid Configuration: Dualstack and custom endpoint are not supported", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} + +// Missing region +func TestEndpointCase52(t *testing.T) { + var params = EndpointParameters{ + +} + +resolver := NewDefaultEndpointResolverV2() +result, err := resolver.ResolveEndpoint(context.Background(), params) +_, _ = result, err + +if err == nil { + t.Fatalf("expect error, got none") +} +if e, a := "Invalid Configuration: Missing Region", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) +} +} diff --git a/aws-sdk-go-v2/service/ecs/generated.json b/aws-sdk-go-v2/service/ecs/generated.json new file mode 100644 index 00000000000..bb4c88a1b84 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/generated.json @@ -0,0 +1,89 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/jmespath/go-jmespath": "v0.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateCapacityProvider.go", + "api_op_CreateCluster.go", + "api_op_CreateService.go", + "api_op_CreateTaskSet.go", + "api_op_DeleteAccountSetting.go", + "api_op_DeleteAttributes.go", + "api_op_DeleteCapacityProvider.go", + "api_op_DeleteCluster.go", + "api_op_DeleteService.go", + "api_op_DeleteTaskDefinitions.go", + "api_op_DeleteTaskSet.go", + "api_op_DeregisterContainerInstance.go", + "api_op_DeregisterTaskDefinition.go", + "api_op_DescribeCapacityProviders.go", + "api_op_DescribeClusters.go", + "api_op_DescribeContainerInstances.go", + "api_op_DescribeServices.go", + "api_op_DescribeTaskDefinition.go", + "api_op_DescribeTaskSets.go", + "api_op_DescribeTasks.go", + "api_op_DiscoverPollEndpoint.go", + "api_op_ExecuteCommand.go", + "api_op_GetTaskProtection.go", + "api_op_ListAccountSettings.go", + "api_op_ListAttributes.go", + "api_op_ListClusters.go", + "api_op_ListContainerInstances.go", + "api_op_ListServices.go", + "api_op_ListServicesByNamespace.go", + "api_op_ListTagsForResource.go", + "api_op_ListTaskDefinitionFamilies.go", + "api_op_ListTaskDefinitions.go", + "api_op_ListTasks.go", + "api_op_PutAccountSetting.go", + "api_op_PutAccountSettingDefault.go", + "api_op_PutAttributes.go", + "api_op_PutClusterCapacityProviders.go", + "api_op_RegisterContainerInstance.go", + "api_op_RegisterTaskDefinition.go", + "api_op_RunTask.go", + "api_op_StartTask.go", + "api_op_StopTask.go", + "api_op_SubmitAttachmentStateChanges.go", + "api_op_SubmitContainerStateChange.go", + "api_op_SubmitTaskStateChange.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateCapacityProvider.go", + "api_op_UpdateCluster.go", + "api_op_UpdateClusterSettings.go", + "api_op_UpdateContainerAgent.go", + "api_op_UpdateContainerInstancesState.go", + "api_op_UpdateService.go", + "api_op_UpdateServicePrimaryTaskSet.go", + "api_op_UpdateTaskProtection.go", + "api_op_UpdateTaskSet.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ecs", + "unstable": false +} diff --git a/aws-sdk-go-v2/service/ecs/go.mod b/aws-sdk-go-v2/service/ecs/go.mod new file mode 100644 index 00000000000..90d58cadf71 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/go.mod @@ -0,0 +1,11 @@ +module github.com/aws/aws-sdk-go-v2/service/ecs + +go 1.21 + +require ( + github.com/aws/aws-sdk-go-v2 v1.32.2 + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 + github.com/aws/smithy-go v1.22.0 + github.com/jmespath/go-jmespath v0.4.0 +) diff --git a/aws-sdk-go-v2/service/ecs/go.sum b/aws-sdk-go-v2/service/ecs/go.sum new file mode 100644 index 00000000000..14753cfcc5b --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/go.sum @@ -0,0 +1,20 @@ +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/aws-sdk-go-v2/service/ecs/go_module_metadata.go b/aws-sdk-go-v2/service/ecs/go_module_metadata.go new file mode 100644 index 00000000000..df63a06f026 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ecs + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.47.4" diff --git a/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go b/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..0b8a7bd2240 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go @@ -0,0 +1,542 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState + +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} +// Resolver ECS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { return endpoint, &aws.MissingRegionError{} } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} +var partitionRegexp = struct{ + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.DualStackVariant, + }:{ + Hostname: "ecs.{region}.api.aws", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.amazonaws.com", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant|endpoints.DualStackVariant, + }:{ + Hostname: "ecs-fips.{region}.api.aws", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.amazonaws.com", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant:endpoints.FIPSVariant, + }: { + Hostname: "ecs-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant:endpoints.FIPSVariant, + }: { + Hostname: "ecs-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant:endpoints.FIPSVariant, + }: { + Hostname: "ecs-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant:endpoints.FIPSVariant, + }: { + Hostname: "ecs-fips.us-west-2.amazonaws.com", + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.DualStackVariant, + }:{ + Hostname: "ecs.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant|endpoints.DualStackVariant, + }:{ + Hostname: "ecs-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.amazonaws.com.cn", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.c2s.ic.gov", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.c2s.ic.gov", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{ + }, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.sc2s.sgov.gov", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{ + }, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.cloud.adc-e.uk", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.csp.hci.ic.gov", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + endpoints.DefaultKey{ + Variant:endpoints.DualStackVariant, + }:{ + Hostname: "ecs.{region}.api.aws", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant, + }:{ + Hostname: "ecs-fips.{region}.amazonaws.com", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:endpoints.FIPSVariant|endpoints.DualStackVariant, + }:{ + Hostname: "ecs-fips.{region}.api.aws", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + endpoints.DefaultKey{ + Variant:0, + }:{ + Hostname: "ecs.{region}.amazonaws.com", + Protocols: []string{"https", }, + SignatureVersions: []string{"v4", }, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant:endpoints.FIPSVariant, + }: { + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant:endpoints.FIPSVariant, + }: { + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, +} diff --git a/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints_test.go b/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints_test.go new file mode 100644 index 00000000000..5fb0ff196e4 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints_test.go @@ -0,0 +1,12 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package endpoints + +import ( + "testing" +) + +func TestRegexCompile(t *testing.T) { + _ = defaultPartitions +} diff --git a/aws-sdk-go-v2/service/ecs/options.go b/aws-sdk-go-v2/service/ecs/options.go new file mode 100644 index 00000000000..bef2f3cadbf --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/options.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "context" + "net/http" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + smithyauth "github.com/aws/smithy-go/auth" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/aws/smithy-go/tracing" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all +// operations invoked for this client. Use functional options on operation call to +// modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. +AppID string + +// This endpoint will be given as input to an EndpointResolverV2. It is used for +// providing a custom base endpoint that is subject to modifications by the +// processing EndpointResolverV2. +BaseEndpoint *string + +// Configures the events that will be sent to the configured logger. +ClientLogMode aws.ClientLogMode + +// The credentials object to use when signing requests. +Credentials aws.CredentialsProvider + +// The configuration DefaultsMode that the SDK should use when constructing the +// clients initial default settings. +DefaultsMode aws.DefaultsMode + +// The endpoint options to be used when attempting to resolve an endpoint. +EndpointOptions EndpointResolverOptions + +// The service endpoint resolver. +// +// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a +// value for this field will likely prevent you from using any endpoint-related +// service features released after the introduction of EndpointResolverV2 and +// BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +EndpointResolver EndpointResolver + +// Resolves the endpoint used for a particular service operation. This should be +// used over the deprecated EndpointResolver. +EndpointResolverV2 EndpointResolverV2 + +// Signature Version 4 (SigV4) Signer +HTTPSignerV4 HTTPSignerV4 + +// Provides idempotency tokens values that will be automatically populated into +// idempotent API operations. +IdempotencyTokenProvider IdempotencyTokenProvider + +// The logger writer interface to write logging messages to. +Logger logging.Logger + +// The client meter provider. +MeterProvider metrics.MeterProvider + +// The region to send requests to. (Required) +Region string + +// RetryMaxAttempts specifies the maximum number attempts an API client will call +// an operation that fails with a retryable error. A value of 0 is ignored, and +// will not be used to configure the API client created default retryer, or modify +// per operation call's retry max attempts. +// +// If specified in an operation call's functional options with a value that is +// different than the constructed client's Options, the Client's Retryer will be +// wrapped to use the operation's specific RetryMaxAttempts value. +RetryMaxAttempts int + +// RetryMode specifies the retry mode the API client will be created with, if +// Retryer option is not also specified. +// +// When creating a new API Clients this member will only be used if the Retryer +// Options member is nil. This value will be ignored if Retryer is not nil. +// +// Currently does not support per operation call overrides, may in the future. +RetryMode aws.RetryMode + +// Retryer guides how HTTP requests should be retried in case of recoverable +// failures. When nil the API client will use a default retryer. The kind of +// default retry created by the API client can be changed with the RetryMode +// option. +Retryer aws.Retryer + +// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set +// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You +// should not populate this structure programmatically, or rely on the values here +// within your applications. +RuntimeEnvironment aws.RuntimeEnvironment + +// The client tracer provider. +TracerProvider tracing.TracerProvider + +// The initial DefaultsMode used when the client options were constructed. If the +// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved +// value was at that point in time. +// +// Currently does not support per operation call overrides, may in the future. +resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP +// implementation if nil. +HTTPClient HTTPClient + +// The auth scheme resolver which determines how to authenticate for each +// operation. +AuthSchemeResolver AuthSchemeResolver + +// The list of auth schemes supported by the client. +AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) +} + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} +} + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func (o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) +} + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) +} + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/aws-sdk-go-v2/service/ecs/protocol_test.go b/aws-sdk-go-v2/service/ecs/protocol_test.go new file mode 100644 index 00000000000..c1501dbf2b0 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/protocol_test.go @@ -0,0 +1,6 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + + diff --git a/aws-sdk-go-v2/service/ecs/serializers.go b/aws-sdk-go-v2/service/ecs/serializers.go new file mode 100644 index 00000000000..20b67f5e1c9 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/serializers.go @@ -0,0 +1,8226 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/smithy-go/encoding/httpbinding" + "math" + "github.com/aws/smithy-go/middleware" + "path" + smithy "github.com/aws/smithy-go" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithyjson "github.com/aws/smithy-go/encoding/json" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +type awsAwsjson11_serializeOpCreateCapacityProvider struct { +} + +func (*awsAwsjson11_serializeOpCreateCapacityProvider) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCapacityProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateCapacityProviderInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.CreateCapacityProvider") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateCapacityProviderInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpCreateCluster struct { +} + +func (*awsAwsjson11_serializeOpCreateCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.CreateCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpCreateService struct { +} + +func (*awsAwsjson11_serializeOpCreateService) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateService) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateServiceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.CreateService") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateServiceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpCreateTaskSet struct { +} + +func (*awsAwsjson11_serializeOpCreateTaskSet) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateTaskSet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTaskSetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.CreateTaskSet") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateTaskSetInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteAccountSetting struct { +} + +func (*awsAwsjson11_serializeOpDeleteAccountSetting) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteAccountSetting) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteAccountSettingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteAccountSetting") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteAccountSettingInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteAttributes struct { +} + +func (*awsAwsjson11_serializeOpDeleteAttributes) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteAttributesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteAttributes") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteAttributesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteCapacityProvider struct { +} + +func (*awsAwsjson11_serializeOpDeleteCapacityProvider) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCapacityProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteCapacityProviderInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteCapacityProvider") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteCapacityProviderInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteCluster struct { +} + +func (*awsAwsjson11_serializeOpDeleteCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteService struct { +} + +func (*awsAwsjson11_serializeOpDeleteService) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteService) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteServiceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteService") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteServiceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteTaskDefinitions struct { +} + +func (*awsAwsjson11_serializeOpDeleteTaskDefinitions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteTaskDefinitions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteTaskDefinitionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteTaskDefinitions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteTaskDefinitionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeleteTaskSet struct { +} + +func (*awsAwsjson11_serializeOpDeleteTaskSet) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteTaskSet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteTaskSetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeleteTaskSet") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteTaskSetInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeregisterContainerInstance struct { +} + +func (*awsAwsjson11_serializeOpDeregisterContainerInstance) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeregisterContainerInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeregisterContainerInstanceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeregisterContainerInstance") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeregisterContainerInstanceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDeregisterTaskDefinition struct { +} + +func (*awsAwsjson11_serializeOpDeregisterTaskDefinition) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeregisterTaskDefinition) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeregisterTaskDefinitionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DeregisterTaskDefinition") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeregisterTaskDefinitionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeCapacityProviders struct { +} + +func (*awsAwsjson11_serializeOpDescribeCapacityProviders) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeCapacityProviders) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeCapacityProvidersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeCapacityProviders") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeCapacityProvidersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeClusters struct { +} + +func (*awsAwsjson11_serializeOpDescribeClusters) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeClusters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeClustersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeClusters") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeClustersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeContainerInstances struct { +} + +func (*awsAwsjson11_serializeOpDescribeContainerInstances) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeContainerInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeContainerInstancesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeContainerInstances") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeContainerInstancesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeServices struct { +} + +func (*awsAwsjson11_serializeOpDescribeServices) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeServices) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeServicesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeServices") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeServicesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeTaskDefinition struct { +} + +func (*awsAwsjson11_serializeOpDescribeTaskDefinition) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeTaskDefinition) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeTaskDefinitionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeTaskDefinition") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeTaskDefinitionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeTasks struct { +} + +func (*awsAwsjson11_serializeOpDescribeTasks) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeTasksInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeTasks") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeTasksInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDescribeTaskSets struct { +} + +func (*awsAwsjson11_serializeOpDescribeTaskSets) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeTaskSets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeTaskSetsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DescribeTaskSets") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeTaskSetsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpDiscoverPollEndpoint struct { +} + +func (*awsAwsjson11_serializeOpDiscoverPollEndpoint) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDiscoverPollEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DiscoverPollEndpointInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.DiscoverPollEndpoint") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDiscoverPollEndpointInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpExecuteCommand struct { +} + +func (*awsAwsjson11_serializeOpExecuteCommand) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpExecuteCommand) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ExecuteCommandInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ExecuteCommand") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentExecuteCommandInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpGetTaskProtection struct { +} + +func (*awsAwsjson11_serializeOpGetTaskProtection) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetTaskProtection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetTaskProtectionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.GetTaskProtection") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetTaskProtectionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListAccountSettings struct { +} + +func (*awsAwsjson11_serializeOpListAccountSettings) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListAccountSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountSettingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListAccountSettings") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListAccountSettingsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListAttributes struct { +} + +func (*awsAwsjson11_serializeOpListAttributes) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAttributesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListAttributes") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListAttributesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListClusters struct { +} + +func (*awsAwsjson11_serializeOpListClusters) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListClusters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListClustersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListClusters") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListClustersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListContainerInstances struct { +} + +func (*awsAwsjson11_serializeOpListContainerInstances) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListContainerInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListContainerInstancesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListContainerInstances") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListContainerInstancesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListServices struct { +} + +func (*awsAwsjson11_serializeOpListServices) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListServices) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListServicesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListServices") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListServicesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListServicesByNamespace struct { +} + +func (*awsAwsjson11_serializeOpListServicesByNamespace) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListServicesByNamespace) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListServicesByNamespaceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListServicesByNamespace") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListServicesByNamespaceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListTagsForResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListTaskDefinitionFamilies struct { +} + +func (*awsAwsjson11_serializeOpListTaskDefinitionFamilies) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTaskDefinitionFamilies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTaskDefinitionFamiliesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListTaskDefinitionFamilies") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTaskDefinitionFamiliesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListTaskDefinitions struct { +} + +func (*awsAwsjson11_serializeOpListTaskDefinitions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTaskDefinitions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTaskDefinitionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListTaskDefinitions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTaskDefinitionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpListTasks struct { +} + +func (*awsAwsjson11_serializeOpListTasks) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTasksInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.ListTasks") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTasksInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpPutAccountSetting struct { +} + +func (*awsAwsjson11_serializeOpPutAccountSetting) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutAccountSetting) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutAccountSettingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.PutAccountSetting") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutAccountSettingInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpPutAccountSettingDefault struct { +} + +func (*awsAwsjson11_serializeOpPutAccountSettingDefault) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutAccountSettingDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutAccountSettingDefaultInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.PutAccountSettingDefault") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutAccountSettingDefaultInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpPutAttributes struct { +} + +func (*awsAwsjson11_serializeOpPutAttributes) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutAttributesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.PutAttributes") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutAttributesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpPutClusterCapacityProviders struct { +} + +func (*awsAwsjson11_serializeOpPutClusterCapacityProviders) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutClusterCapacityProviders) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutClusterCapacityProvidersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.PutClusterCapacityProviders") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutClusterCapacityProvidersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpRegisterContainerInstance struct { +} + +func (*awsAwsjson11_serializeOpRegisterContainerInstance) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRegisterContainerInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterContainerInstanceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.RegisterContainerInstance") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRegisterContainerInstanceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpRegisterTaskDefinition struct { +} + +func (*awsAwsjson11_serializeOpRegisterTaskDefinition) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRegisterTaskDefinition) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterTaskDefinitionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.RegisterTaskDefinition") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRegisterTaskDefinitionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpRunTask struct { +} + +func (*awsAwsjson11_serializeOpRunTask) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRunTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RunTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.RunTask") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRunTaskInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpStartTask struct { +} + +func (*awsAwsjson11_serializeOpStartTask) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStartTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.StartTask") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStartTaskInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpStopTask struct { +} + +func (*awsAwsjson11_serializeOpStopTask) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStopTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StopTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.StopTask") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStopTaskInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpSubmitAttachmentStateChanges struct { +} + +func (*awsAwsjson11_serializeOpSubmitAttachmentStateChanges) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSubmitAttachmentStateChanges) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SubmitAttachmentStateChangesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.SubmitAttachmentStateChanges") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSubmitAttachmentStateChangesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpSubmitContainerStateChange struct { +} + +func (*awsAwsjson11_serializeOpSubmitContainerStateChange) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSubmitContainerStateChange) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SubmitContainerStateChangeInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.SubmitContainerStateChange") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSubmitContainerStateChangeInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpSubmitTaskStateChange struct { +} + +func (*awsAwsjson11_serializeOpSubmitTaskStateChange) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSubmitTaskStateChange) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SubmitTaskStateChangeInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.SubmitTaskStateChange") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSubmitTaskStateChangeInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateCapacityProvider struct { +} + +func (*awsAwsjson11_serializeOpUpdateCapacityProvider) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCapacityProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateCapacityProviderInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateCapacityProvider") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateCapacityProviderInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateCluster struct { +} + +func (*awsAwsjson11_serializeOpUpdateCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateClusterSettings struct { +} + +func (*awsAwsjson11_serializeOpUpdateClusterSettings) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateClusterSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateClusterSettingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateClusterSettings") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateClusterSettingsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateContainerAgent struct { +} + +func (*awsAwsjson11_serializeOpUpdateContainerAgent) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateContainerAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateContainerAgentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateContainerAgent") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateContainerAgentInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateContainerInstancesState struct { +} + +func (*awsAwsjson11_serializeOpUpdateContainerInstancesState) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateContainerInstancesState) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateContainerInstancesStateInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateContainerInstancesState") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateContainerInstancesStateInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateService struct { +} + +func (*awsAwsjson11_serializeOpUpdateService) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateService) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateServiceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateService") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateServiceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateServicePrimaryTaskSet struct { +} + +func (*awsAwsjson11_serializeOpUpdateServicePrimaryTaskSet) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateServicePrimaryTaskSet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateServicePrimaryTaskSetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateServicePrimaryTaskSet") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateServicePrimaryTaskSetInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateTaskProtection struct { +} + +func (*awsAwsjson11_serializeOpUpdateTaskProtection) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateTaskProtection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateTaskProtectionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateTaskProtection") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateTaskProtectionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +type awsAwsjson11_serializeOpUpdateTaskSet struct { +} + +func (*awsAwsjson11_serializeOpUpdateTaskSet) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateTaskSet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateTaskSetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerServiceV20141113.UpdateTaskSet") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateTaskSetInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentAttachmentStateChange(v *types.AttachmentStateChange, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttachmentArn != nil { + ok := object.Key("attachmentArn") + ok.String(*v.AttachmentArn) + } + + if v.Status != nil { + ok := object.Key("status") + ok.String(*v.Status) + } + + return nil +} + +func awsAwsjson11_serializeDocumentAttachmentStateChanges(v []types.AttachmentStateChange, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentAttachmentStateChange(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentAttribute(v *types.Attribute, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.TargetId != nil { + ok := object.Key("targetId") + ok.String(*v.TargetId) + } + + if len(v.TargetType) > 0 { + ok := object.Key("targetType") + ok.String(string(v.TargetType)) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentAttributes(v []types.Attribute, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentAttribute(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentAutoScalingGroupProvider(v *types.AutoScalingGroupProvider, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AutoScalingGroupArn != nil { + ok := object.Key("autoScalingGroupArn") + ok.String(*v.AutoScalingGroupArn) + } + + if len(v.ManagedDraining) > 0 { + ok := object.Key("managedDraining") + ok.String(string(v.ManagedDraining)) + } + + if v.ManagedScaling != nil { + ok := object.Key("managedScaling") + if err := awsAwsjson11_serializeDocumentManagedScaling(v.ManagedScaling, ok); err != nil { + return err + } + } + + if len(v.ManagedTerminationProtection) > 0 { + ok := object.Key("managedTerminationProtection") + ok.String(string(v.ManagedTerminationProtection)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentAutoScalingGroupProviderUpdate(v *types.AutoScalingGroupProviderUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ManagedDraining) > 0 { + ok := object.Key("managedDraining") + ok.String(string(v.ManagedDraining)) + } + + if v.ManagedScaling != nil { + ok := object.Key("managedScaling") + if err := awsAwsjson11_serializeDocumentManagedScaling(v.ManagedScaling, ok); err != nil { + return err + } + } + + if len(v.ManagedTerminationProtection) > 0 { + ok := object.Key("managedTerminationProtection") + ok.String(string(v.ManagedTerminationProtection)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentAwsVpcConfiguration(v *types.AwsVpcConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.AssignPublicIp) > 0 { + ok := object.Key("assignPublicIp") + ok.String(string(v.AssignPublicIp)) + } + + if v.SecurityGroups != nil { + ok := object.Key("securityGroups") + if err := awsAwsjson11_serializeDocumentStringList(v.SecurityGroups, ok); err != nil { + return err + } + } + + if v.Subnets != nil { + ok := object.Key("subnets") + if err := awsAwsjson11_serializeDocumentStringList(v.Subnets, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentCapacityProviderFieldList(v []types.CapacityProviderField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentCapacityProviderStrategy(v []types.CapacityProviderStrategyItem, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategyItem(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentCapacityProviderStrategyItem(v *types.CapacityProviderStrategyItem, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Base != 0 { + ok := object.Key("base") + ok.Integer(v.Base) + } + + if v.CapacityProvider != nil { + ok := object.Key("capacityProvider") + ok.String(*v.CapacityProvider) + } + + if v.Weight != 0 { + ok := object.Key("weight") + ok.Integer(v.Weight) + } + + return nil +} + +func awsAwsjson11_serializeDocumentClusterConfiguration(v *types.ClusterConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExecuteCommandConfiguration != nil { + ok := object.Key("executeCommandConfiguration") + if err := awsAwsjson11_serializeDocumentExecuteCommandConfiguration(v.ExecuteCommandConfiguration, ok); err != nil { + return err + } + } + + if v.ManagedStorageConfiguration != nil { + ok := object.Key("managedStorageConfiguration") + if err := awsAwsjson11_serializeDocumentManagedStorageConfiguration(v.ManagedStorageConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentClusterFieldList(v []types.ClusterField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentClusterServiceConnectDefaultsRequest(v *types.ClusterServiceConnectDefaultsRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Namespace != nil { + ok := object.Key("namespace") + ok.String(*v.Namespace) + } + + return nil +} + +func awsAwsjson11_serializeDocumentClusterSetting(v *types.ClusterSetting, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Name) > 0 { + ok := object.Key("name") + ok.String(string(v.Name)) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentClusterSettings(v []types.ClusterSetting, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentClusterSetting(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentCompatibilityList(v []types.Compatibility, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentContainerDefinition(v *types.ContainerDefinition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Command != nil { + ok := object.Key("command") + if err := awsAwsjson11_serializeDocumentStringList(v.Command, ok); err != nil { + return err + } + } + + if v.Cpu != 0 { + ok := object.Key("cpu") + ok.Integer(v.Cpu) + } + + if v.CredentialSpecs != nil { + ok := object.Key("credentialSpecs") + if err := awsAwsjson11_serializeDocumentStringList(v.CredentialSpecs, ok); err != nil { + return err + } + } + + if v.DependsOn != nil { + ok := object.Key("dependsOn") + if err := awsAwsjson11_serializeDocumentContainerDependencies(v.DependsOn, ok); err != nil { + return err + } + } + + if v.DisableNetworking != nil { + ok := object.Key("disableNetworking") + ok.Boolean(*v.DisableNetworking) + } + + if v.DnsSearchDomains != nil { + ok := object.Key("dnsSearchDomains") + if err := awsAwsjson11_serializeDocumentStringList(v.DnsSearchDomains, ok); err != nil { + return err + } + } + + if v.DnsServers != nil { + ok := object.Key("dnsServers") + if err := awsAwsjson11_serializeDocumentStringList(v.DnsServers, ok); err != nil { + return err + } + } + + if v.DockerLabels != nil { + ok := object.Key("dockerLabels") + if err := awsAwsjson11_serializeDocumentDockerLabelsMap(v.DockerLabels, ok); err != nil { + return err + } + } + + if v.DockerSecurityOptions != nil { + ok := object.Key("dockerSecurityOptions") + if err := awsAwsjson11_serializeDocumentStringList(v.DockerSecurityOptions, ok); err != nil { + return err + } + } + + if v.EntryPoint != nil { + ok := object.Key("entryPoint") + if err := awsAwsjson11_serializeDocumentStringList(v.EntryPoint, ok); err != nil { + return err + } + } + + if v.Environment != nil { + ok := object.Key("environment") + if err := awsAwsjson11_serializeDocumentEnvironmentVariables(v.Environment, ok); err != nil { + return err + } + } + + if v.EnvironmentFiles != nil { + ok := object.Key("environmentFiles") + if err := awsAwsjson11_serializeDocumentEnvironmentFiles(v.EnvironmentFiles, ok); err != nil { + return err + } + } + + if v.Essential != nil { + ok := object.Key("essential") + ok.Boolean(*v.Essential) + } + + if v.ExtraHosts != nil { + ok := object.Key("extraHosts") + if err := awsAwsjson11_serializeDocumentHostEntryList(v.ExtraHosts, ok); err != nil { + return err + } + } + + if v.FirelensConfiguration != nil { + ok := object.Key("firelensConfiguration") + if err := awsAwsjson11_serializeDocumentFirelensConfiguration(v.FirelensConfiguration, ok); err != nil { + return err + } + } + + if v.HealthCheck != nil { + ok := object.Key("healthCheck") + if err := awsAwsjson11_serializeDocumentHealthCheck(v.HealthCheck, ok); err != nil { + return err + } + } + + if v.Hostname != nil { + ok := object.Key("hostname") + ok.String(*v.Hostname) + } + + if v.Image != nil { + ok := object.Key("image") + ok.String(*v.Image) + } + + if v.Interactive != nil { + ok := object.Key("interactive") + ok.Boolean(*v.Interactive) + } + + if v.Links != nil { + ok := object.Key("links") + if err := awsAwsjson11_serializeDocumentStringList(v.Links, ok); err != nil { + return err + } + } + + if v.LinuxParameters != nil { + ok := object.Key("linuxParameters") + if err := awsAwsjson11_serializeDocumentLinuxParameters(v.LinuxParameters, ok); err != nil { + return err + } + } + + if v.LogConfiguration != nil { + ok := object.Key("logConfiguration") + if err := awsAwsjson11_serializeDocumentLogConfiguration(v.LogConfiguration, ok); err != nil { + return err + } + } + + if v.Memory != nil { + ok := object.Key("memory") + ok.Integer(*v.Memory) + } + + if v.MemoryReservation != nil { + ok := object.Key("memoryReservation") + ok.Integer(*v.MemoryReservation) + } + + if v.MountPoints != nil { + ok := object.Key("mountPoints") + if err := awsAwsjson11_serializeDocumentMountPointList(v.MountPoints, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.PortMappings != nil { + ok := object.Key("portMappings") + if err := awsAwsjson11_serializeDocumentPortMappingList(v.PortMappings, ok); err != nil { + return err + } + } + + if v.Privileged != nil { + ok := object.Key("privileged") + ok.Boolean(*v.Privileged) + } + + if v.PseudoTerminal != nil { + ok := object.Key("pseudoTerminal") + ok.Boolean(*v.PseudoTerminal) + } + + if v.ReadonlyRootFilesystem != nil { + ok := object.Key("readonlyRootFilesystem") + ok.Boolean(*v.ReadonlyRootFilesystem) + } + + if v.RepositoryCredentials != nil { + ok := object.Key("repositoryCredentials") + if err := awsAwsjson11_serializeDocumentRepositoryCredentials(v.RepositoryCredentials, ok); err != nil { + return err + } + } + + if v.ResourceRequirements != nil { + ok := object.Key("resourceRequirements") + if err := awsAwsjson11_serializeDocumentResourceRequirements(v.ResourceRequirements, ok); err != nil { + return err + } + } + + if v.RestartPolicy != nil { + ok := object.Key("restartPolicy") + if err := awsAwsjson11_serializeDocumentContainerRestartPolicy(v.RestartPolicy, ok); err != nil { + return err + } + } + + if v.Secrets != nil { + ok := object.Key("secrets") + if err := awsAwsjson11_serializeDocumentSecretList(v.Secrets, ok); err != nil { + return err + } + } + + if v.StartTimeout != nil { + ok := object.Key("startTimeout") + ok.Integer(*v.StartTimeout) + } + + if v.StopTimeout != nil { + ok := object.Key("stopTimeout") + ok.Integer(*v.StopTimeout) + } + + if v.SystemControls != nil { + ok := object.Key("systemControls") + if err := awsAwsjson11_serializeDocumentSystemControls(v.SystemControls, ok); err != nil { + return err + } + } + + if v.Ulimits != nil { + ok := object.Key("ulimits") + if err := awsAwsjson11_serializeDocumentUlimitList(v.Ulimits, ok); err != nil { + return err + } + } + + if v.User != nil { + ok := object.Key("user") + ok.String(*v.User) + } + + if v.VolumesFrom != nil { + ok := object.Key("volumesFrom") + if err := awsAwsjson11_serializeDocumentVolumeFromList(v.VolumesFrom, ok); err != nil { + return err + } + } + + if v.WorkingDirectory != nil { + ok := object.Key("workingDirectory") + ok.String(*v.WorkingDirectory) + } + + return nil +} + +func awsAwsjson11_serializeDocumentContainerDefinitions(v []types.ContainerDefinition, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentContainerDefinition(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentContainerDependencies(v []types.ContainerDependency, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentContainerDependency(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentContainerDependency(v *types.ContainerDependency, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Condition) > 0 { + ok := object.Key("condition") + ok.String(string(v.Condition)) + } + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + return nil +} + +func awsAwsjson11_serializeDocumentContainerInstanceFieldList(v []types.ContainerInstanceField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentContainerOverride(v *types.ContainerOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Command != nil { + ok := object.Key("command") + if err := awsAwsjson11_serializeDocumentStringList(v.Command, ok); err != nil { + return err + } + } + + if v.Cpu != nil { + ok := object.Key("cpu") + ok.Integer(*v.Cpu) + } + + if v.Environment != nil { + ok := object.Key("environment") + if err := awsAwsjson11_serializeDocumentEnvironmentVariables(v.Environment, ok); err != nil { + return err + } + } + + if v.EnvironmentFiles != nil { + ok := object.Key("environmentFiles") + if err := awsAwsjson11_serializeDocumentEnvironmentFiles(v.EnvironmentFiles, ok); err != nil { + return err + } + } + + if v.Memory != nil { + ok := object.Key("memory") + ok.Integer(*v.Memory) + } + + if v.MemoryReservation != nil { + ok := object.Key("memoryReservation") + ok.Integer(*v.MemoryReservation) + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.ResourceRequirements != nil { + ok := object.Key("resourceRequirements") + if err := awsAwsjson11_serializeDocumentResourceRequirements(v.ResourceRequirements, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentContainerOverrides(v []types.ContainerOverride, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentContainerOverride(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentContainerRestartPolicy(v *types.ContainerRestartPolicy, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Enabled != nil { + ok := object.Key("enabled") + ok.Boolean(*v.Enabled) + } + + if v.IgnoredExitCodes != nil { + ok := object.Key("ignoredExitCodes") + if err := awsAwsjson11_serializeDocumentIntegerList(v.IgnoredExitCodes, ok); err != nil { + return err + } + } + + if v.RestartAttemptPeriod != nil { + ok := object.Key("restartAttemptPeriod") + ok.Integer(*v.RestartAttemptPeriod) + } + + return nil +} + +func awsAwsjson11_serializeDocumentContainerStateChange(v *types.ContainerStateChange, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + if v.ExitCode != nil { + ok := object.Key("exitCode") + ok.Integer(*v.ExitCode) + } + + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.NetworkBindings != nil { + ok := object.Key("networkBindings") + if err := awsAwsjson11_serializeDocumentNetworkBindings(v.NetworkBindings, ok); err != nil { + return err + } + } + + if v.Reason != nil { + ok := object.Key("reason") + ok.String(*v.Reason) + } + + if v.RuntimeId != nil { + ok := object.Key("runtimeId") + ok.String(*v.RuntimeId) + } + + if v.Status != nil { + ok := object.Key("status") + ok.String(*v.Status) + } + + return nil +} + +func awsAwsjson11_serializeDocumentContainerStateChanges(v []types.ContainerStateChange, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentContainerStateChange(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentDeploymentAlarms(v *types.DeploymentAlarms, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AlarmNames != nil { + ok := object.Key("alarmNames") + if err := awsAwsjson11_serializeDocumentStringList(v.AlarmNames, ok); err != nil { + return err + } + } + + { + ok := object.Key("enable") + ok.Boolean(v.Enable) + } + + { + ok := object.Key("rollback") + ok.Boolean(v.Rollback) + } + + return nil +} + +func awsAwsjson11_serializeDocumentDeploymentCircuitBreaker(v *types.DeploymentCircuitBreaker, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("enable") + ok.Boolean(v.Enable) + } + + { + ok := object.Key("rollback") + ok.Boolean(v.Rollback) + } + + return nil +} + +func awsAwsjson11_serializeDocumentDeploymentConfiguration(v *types.DeploymentConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Alarms != nil { + ok := object.Key("alarms") + if err := awsAwsjson11_serializeDocumentDeploymentAlarms(v.Alarms, ok); err != nil { + return err + } + } + + if v.DeploymentCircuitBreaker != nil { + ok := object.Key("deploymentCircuitBreaker") + if err := awsAwsjson11_serializeDocumentDeploymentCircuitBreaker(v.DeploymentCircuitBreaker, ok); err != nil { + return err + } + } + + if v.MaximumPercent != nil { + ok := object.Key("maximumPercent") + ok.Integer(*v.MaximumPercent) + } + + if v.MinimumHealthyPercent != nil { + ok := object.Key("minimumHealthyPercent") + ok.Integer(*v.MinimumHealthyPercent) + } + + return nil +} + +func awsAwsjson11_serializeDocumentDeploymentController(v *types.DeploymentController, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentDevice(v *types.Device, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerPath != nil { + ok := object.Key("containerPath") + ok.String(*v.ContainerPath) + } + + if v.HostPath != nil { + ok := object.Key("hostPath") + ok.String(*v.HostPath) + } + + if v.Permissions != nil { + ok := object.Key("permissions") + if err := awsAwsjson11_serializeDocumentDeviceCgroupPermissions(v.Permissions, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentDeviceCgroupPermissions(v []types.DeviceCgroupPermission, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentDevicesList(v []types.Device, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentDevice(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentDockerLabelsMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentDockerVolumeConfiguration(v *types.DockerVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Autoprovision != nil { + ok := object.Key("autoprovision") + ok.Boolean(*v.Autoprovision) + } + + if v.Driver != nil { + ok := object.Key("driver") + ok.String(*v.Driver) + } + + if v.DriverOpts != nil { + ok := object.Key("driverOpts") + if err := awsAwsjson11_serializeDocumentStringMap(v.DriverOpts, ok); err != nil { + return err + } + } + + if v.Labels != nil { + ok := object.Key("labels") + if err := awsAwsjson11_serializeDocumentStringMap(v.Labels, ok); err != nil { + return err + } + } + + if len(v.Scope) > 0 { + ok := object.Key("scope") + ok.String(string(v.Scope)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEBSTagSpecification(v *types.EBSTagSpecification, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.PropagateTags) > 0 { + ok := object.Key("propagateTags") + ok.String(string(v.PropagateTags)) + } + + if len(v.ResourceType) > 0 { + ok := object.Key("resourceType") + ok.String(string(v.ResourceType)) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentEBSTagSpecifications(v []types.EBSTagSpecification, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentEBSTagSpecification(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentEFSAuthorizationConfig(v *types.EFSAuthorizationConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccessPointId != nil { + ok := object.Key("accessPointId") + ok.String(*v.AccessPointId) + } + + if len(v.Iam) > 0 { + ok := object.Key("iam") + ok.String(string(v.Iam)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEFSVolumeConfiguration(v *types.EFSVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AuthorizationConfig != nil { + ok := object.Key("authorizationConfig") + if err := awsAwsjson11_serializeDocumentEFSAuthorizationConfig(v.AuthorizationConfig, ok); err != nil { + return err + } + } + + if v.FileSystemId != nil { + ok := object.Key("fileSystemId") + ok.String(*v.FileSystemId) + } + + if v.RootDirectory != nil { + ok := object.Key("rootDirectory") + ok.String(*v.RootDirectory) + } + + if len(v.TransitEncryption) > 0 { + ok := object.Key("transitEncryption") + ok.String(string(v.TransitEncryption)) + } + + if v.TransitEncryptionPort != nil { + ok := object.Key("transitEncryptionPort") + ok.Integer(*v.TransitEncryptionPort) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEnvironmentFile(v *types.EnvironmentFile, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEnvironmentFiles(v []types.EnvironmentFile, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentEnvironmentFile(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentEnvironmentVariables(v []types.KeyValuePair, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentKeyValuePair(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentEphemeralStorage(v *types.EphemeralStorage, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("sizeInGiB") + ok.Integer(v.SizeInGiB) + } + + return nil +} + +func awsAwsjson11_serializeDocumentExecuteCommandConfiguration(v *types.ExecuteCommandConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KmsKeyId != nil { + ok := object.Key("kmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.LogConfiguration != nil { + ok := object.Key("logConfiguration") + if err := awsAwsjson11_serializeDocumentExecuteCommandLogConfiguration(v.LogConfiguration, ok); err != nil { + return err + } + } + + if len(v.Logging) > 0 { + ok := object.Key("logging") + ok.String(string(v.Logging)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentExecuteCommandLogConfiguration(v *types.ExecuteCommandLogConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudWatchEncryptionEnabled { + ok := object.Key("cloudWatchEncryptionEnabled") + ok.Boolean(v.CloudWatchEncryptionEnabled) + } + + if v.CloudWatchLogGroupName != nil { + ok := object.Key("cloudWatchLogGroupName") + ok.String(*v.CloudWatchLogGroupName) + } + + if v.S3BucketName != nil { + ok := object.Key("s3BucketName") + ok.String(*v.S3BucketName) + } + + if v.S3EncryptionEnabled { + ok := object.Key("s3EncryptionEnabled") + ok.Boolean(v.S3EncryptionEnabled) + } + + if v.S3KeyPrefix != nil { + ok := object.Key("s3KeyPrefix") + ok.String(*v.S3KeyPrefix) + } + + return nil +} + +func awsAwsjson11_serializeDocumentFirelensConfiguration(v *types.FirelensConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Options != nil { + ok := object.Key("options") + if err := awsAwsjson11_serializeDocumentFirelensConfigurationOptionsMap(v.Options, ok); err != nil { + return err + } + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentFirelensConfigurationOptionsMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentFSxWindowsFileServerAuthorizationConfig(v *types.FSxWindowsFileServerAuthorizationConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CredentialsParameter != nil { + ok := object.Key("credentialsParameter") + ok.String(*v.CredentialsParameter) + } + + if v.Domain != nil { + ok := object.Key("domain") + ok.String(*v.Domain) + } + + return nil +} + +func awsAwsjson11_serializeDocumentFSxWindowsFileServerVolumeConfiguration(v *types.FSxWindowsFileServerVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AuthorizationConfig != nil { + ok := object.Key("authorizationConfig") + if err := awsAwsjson11_serializeDocumentFSxWindowsFileServerAuthorizationConfig(v.AuthorizationConfig, ok); err != nil { + return err + } + } + + if v.FileSystemId != nil { + ok := object.Key("fileSystemId") + ok.String(*v.FileSystemId) + } + + if v.RootDirectory != nil { + ok := object.Key("rootDirectory") + ok.String(*v.RootDirectory) + } + + return nil +} + +func awsAwsjson11_serializeDocumentHealthCheck(v *types.HealthCheck, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Command != nil { + ok := object.Key("command") + if err := awsAwsjson11_serializeDocumentStringList(v.Command, ok); err != nil { + return err + } + } + + if v.Interval != nil { + ok := object.Key("interval") + ok.Integer(*v.Interval) + } + + if v.Retries != nil { + ok := object.Key("retries") + ok.Integer(*v.Retries) + } + + if v.StartPeriod != nil { + ok := object.Key("startPeriod") + ok.Integer(*v.StartPeriod) + } + + if v.Timeout != nil { + ok := object.Key("timeout") + ok.Integer(*v.Timeout) + } + + return nil +} + +func awsAwsjson11_serializeDocumentHostEntry(v *types.HostEntry, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Hostname != nil { + ok := object.Key("hostname") + ok.String(*v.Hostname) + } + + if v.IpAddress != nil { + ok := object.Key("ipAddress") + ok.String(*v.IpAddress) + } + + return nil +} + +func awsAwsjson11_serializeDocumentHostEntryList(v []types.HostEntry, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentHostEntry(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentHostVolumeProperties(v *types.HostVolumeProperties, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SourcePath != nil { + ok := object.Key("sourcePath") + ok.String(*v.SourcePath) + } + + return nil +} + +func awsAwsjson11_serializeDocumentInferenceAccelerator(v *types.InferenceAccelerator, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DeviceName != nil { + ok := object.Key("deviceName") + ok.String(*v.DeviceName) + } + + if v.DeviceType != nil { + ok := object.Key("deviceType") + ok.String(*v.DeviceType) + } + + return nil +} + +func awsAwsjson11_serializeDocumentInferenceAcceleratorOverride(v *types.InferenceAcceleratorOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DeviceName != nil { + ok := object.Key("deviceName") + ok.String(*v.DeviceName) + } + + if v.DeviceType != nil { + ok := object.Key("deviceType") + ok.String(*v.DeviceType) + } + + return nil +} + +func awsAwsjson11_serializeDocumentInferenceAcceleratorOverrides(v []types.InferenceAcceleratorOverride, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentInferenceAcceleratorOverride(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentInferenceAccelerators(v []types.InferenceAccelerator, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentInferenceAccelerator(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentIntegerList(v []int32, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.Integer(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentKernelCapabilities(v *types.KernelCapabilities, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Add != nil { + ok := object.Key("add") + if err := awsAwsjson11_serializeDocumentStringList(v.Add, ok); err != nil { + return err + } + } + + if v.Drop != nil { + ok := object.Key("drop") + if err := awsAwsjson11_serializeDocumentStringList(v.Drop, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentKeyValuePair(v *types.KeyValuePair, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentLinuxParameters(v *types.LinuxParameters, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Capabilities != nil { + ok := object.Key("capabilities") + if err := awsAwsjson11_serializeDocumentKernelCapabilities(v.Capabilities, ok); err != nil { + return err + } + } + + if v.Devices != nil { + ok := object.Key("devices") + if err := awsAwsjson11_serializeDocumentDevicesList(v.Devices, ok); err != nil { + return err + } + } + + if v.InitProcessEnabled != nil { + ok := object.Key("initProcessEnabled") + ok.Boolean(*v.InitProcessEnabled) + } + + if v.MaxSwap != nil { + ok := object.Key("maxSwap") + ok.Integer(*v.MaxSwap) + } + + if v.SharedMemorySize != nil { + ok := object.Key("sharedMemorySize") + ok.Integer(*v.SharedMemorySize) + } + + if v.Swappiness != nil { + ok := object.Key("swappiness") + ok.Integer(*v.Swappiness) + } + + if v.Tmpfs != nil { + ok := object.Key("tmpfs") + if err := awsAwsjson11_serializeDocumentTmpfsList(v.Tmpfs, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentLoadBalancer(v *types.LoadBalancer, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + if v.ContainerPort != nil { + ok := object.Key("containerPort") + ok.Integer(*v.ContainerPort) + } + + if v.LoadBalancerName != nil { + ok := object.Key("loadBalancerName") + ok.String(*v.LoadBalancerName) + } + + if v.TargetGroupArn != nil { + ok := object.Key("targetGroupArn") + ok.String(*v.TargetGroupArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentLoadBalancers(v []types.LoadBalancer, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentLoadBalancer(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentLogConfiguration(v *types.LogConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.LogDriver) > 0 { + ok := object.Key("logDriver") + ok.String(string(v.LogDriver)) + } + + if v.Options != nil { + ok := object.Key("options") + if err := awsAwsjson11_serializeDocumentLogConfigurationOptionsMap(v.Options, ok); err != nil { + return err + } + } + + if v.SecretOptions != nil { + ok := object.Key("secretOptions") + if err := awsAwsjson11_serializeDocumentSecretList(v.SecretOptions, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentLogConfigurationOptionsMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentManagedAgentStateChange(v *types.ManagedAgentStateChange, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + if len(v.ManagedAgentName) > 0 { + ok := object.Key("managedAgentName") + ok.String(string(v.ManagedAgentName)) + } + + if v.Reason != nil { + ok := object.Key("reason") + ok.String(*v.Reason) + } + + if v.Status != nil { + ok := object.Key("status") + ok.String(*v.Status) + } + + return nil +} + +func awsAwsjson11_serializeDocumentManagedAgentStateChanges(v []types.ManagedAgentStateChange, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentManagedAgentStateChange(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentManagedScaling(v *types.ManagedScaling, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.InstanceWarmupPeriod != nil { + ok := object.Key("instanceWarmupPeriod") + ok.Integer(*v.InstanceWarmupPeriod) + } + + if v.MaximumScalingStepSize != nil { + ok := object.Key("maximumScalingStepSize") + ok.Integer(*v.MaximumScalingStepSize) + } + + if v.MinimumScalingStepSize != nil { + ok := object.Key("minimumScalingStepSize") + ok.Integer(*v.MinimumScalingStepSize) + } + + if len(v.Status) > 0 { + ok := object.Key("status") + ok.String(string(v.Status)) + } + + if v.TargetCapacity != nil { + ok := object.Key("targetCapacity") + ok.Integer(*v.TargetCapacity) + } + + return nil +} + +func awsAwsjson11_serializeDocumentManagedStorageConfiguration(v *types.ManagedStorageConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FargateEphemeralStorageKmsKeyId != nil { + ok := object.Key("fargateEphemeralStorageKmsKeyId") + ok.String(*v.FargateEphemeralStorageKmsKeyId) + } + + if v.KmsKeyId != nil { + ok := object.Key("kmsKeyId") + ok.String(*v.KmsKeyId) + } + + return nil +} + +func awsAwsjson11_serializeDocumentMountPoint(v *types.MountPoint, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerPath != nil { + ok := object.Key("containerPath") + ok.String(*v.ContainerPath) + } + + if v.ReadOnly != nil { + ok := object.Key("readOnly") + ok.Boolean(*v.ReadOnly) + } + + if v.SourceVolume != nil { + ok := object.Key("sourceVolume") + ok.String(*v.SourceVolume) + } + + return nil +} + +func awsAwsjson11_serializeDocumentMountPointList(v []types.MountPoint, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentMountPoint(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentNetworkBinding(v *types.NetworkBinding, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BindIP != nil { + ok := object.Key("bindIP") + ok.String(*v.BindIP) + } + + if v.ContainerPort != nil { + ok := object.Key("containerPort") + ok.Integer(*v.ContainerPort) + } + + if v.ContainerPortRange != nil { + ok := object.Key("containerPortRange") + ok.String(*v.ContainerPortRange) + } + + if v.HostPort != nil { + ok := object.Key("hostPort") + ok.Integer(*v.HostPort) + } + + if v.HostPortRange != nil { + ok := object.Key("hostPortRange") + ok.String(*v.HostPortRange) + } + + if len(v.Protocol) > 0 { + ok := object.Key("protocol") + ok.String(string(v.Protocol)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentNetworkBindings(v []types.NetworkBinding, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentNetworkBinding(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentNetworkConfiguration(v *types.NetworkConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AwsvpcConfiguration != nil { + ok := object.Key("awsvpcConfiguration") + if err := awsAwsjson11_serializeDocumentAwsVpcConfiguration(v.AwsvpcConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentPlacementConstraint(v *types.PlacementConstraint, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Expression != nil { + ok := object.Key("expression") + ok.String(*v.Expression) + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentPlacementConstraints(v []types.PlacementConstraint, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentPlacementConstraint(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentPlacementStrategies(v []types.PlacementStrategy, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentPlacementStrategy(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentPlacementStrategy(v *types.PlacementStrategy, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Field != nil { + ok := object.Key("field") + ok.String(*v.Field) + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentPlatformDevice(v *types.PlatformDevice, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Id != nil { + ok := object.Key("id") + ok.String(*v.Id) + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentPlatformDevices(v []types.PlatformDevice, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentPlatformDevice(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentPortMapping(v *types.PortMapping, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.AppProtocol) > 0 { + ok := object.Key("appProtocol") + ok.String(string(v.AppProtocol)) + } + + if v.ContainerPort != nil { + ok := object.Key("containerPort") + ok.Integer(*v.ContainerPort) + } + + if v.ContainerPortRange != nil { + ok := object.Key("containerPortRange") + ok.String(*v.ContainerPortRange) + } + + if v.HostPort != nil { + ok := object.Key("hostPort") + ok.Integer(*v.HostPort) + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if len(v.Protocol) > 0 { + ok := object.Key("protocol") + ok.String(string(v.Protocol)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentPortMappingList(v []types.PortMapping, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentPortMapping(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentProxyConfiguration(v *types.ProxyConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + if v.Properties != nil { + ok := object.Key("properties") + if err := awsAwsjson11_serializeDocumentProxyConfigurationProperties(v.Properties, ok); err != nil { + return err + } + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentProxyConfigurationProperties(v []types.KeyValuePair, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentKeyValuePair(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentRepositoryCredentials(v *types.RepositoryCredentials, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CredentialsParameter != nil { + ok := object.Key("credentialsParameter") + ok.String(*v.CredentialsParameter) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResource(v *types.Resource, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DoubleValue != 0 { + ok := object.Key("doubleValue") + switch { + case math.IsNaN(v.DoubleValue): + ok.String("NaN") + + case math.IsInf(v.DoubleValue, 1): + ok.String("Infinity") + + case math.IsInf(v.DoubleValue, -1): + ok.String("-Infinity") + + default: + ok.Double(v.DoubleValue) + + } + } + + if v.IntegerValue != 0 { + ok := object.Key("integerValue") + ok.Integer(v.IntegerValue) + } + + if v.LongValue != 0 { + ok := object.Key("longValue") + ok.Long(v.LongValue) + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.StringSetValue != nil { + ok := object.Key("stringSetValue") + if err := awsAwsjson11_serializeDocumentStringList(v.StringSetValue, ok); err != nil { + return err + } + } + + if v.Type != nil { + ok := object.Key("type") + ok.String(*v.Type) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResourceRequirement(v *types.ResourceRequirement, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResourceRequirements(v []types.ResourceRequirement, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentResourceRequirement(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentResources(v []types.Resource, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentResource(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentRuntimePlatform(v *types.RuntimePlatform, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.CpuArchitecture) > 0 { + ok := object.Key("cpuArchitecture") + ok.String(string(v.CpuArchitecture)) + } + + if len(v.OperatingSystemFamily) > 0 { + ok := object.Key("operatingSystemFamily") + ok.String(string(v.OperatingSystemFamily)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentScale(v *types.Scale, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Unit) > 0 { + ok := object.Key("unit") + ok.String(string(v.Unit)) + } + + if v.Value != 0 { + ok := object.Key("value") + switch { + case math.IsNaN(v.Value): + ok.String("NaN") + + case math.IsInf(v.Value, 1): + ok.String("Infinity") + + case math.IsInf(v.Value, -1): + ok.String("-Infinity") + + default: + ok.Double(v.Value) + + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentSecret(v *types.Secret, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.ValueFrom != nil { + ok := object.Key("valueFrom") + ok.String(*v.ValueFrom) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSecretList(v []types.Secret, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentSecret(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectClientAlias(v *types.ServiceConnectClientAlias, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DnsName != nil { + ok := object.Key("dnsName") + ok.String(*v.DnsName) + } + + if v.Port != nil { + ok := object.Key("port") + ok.Integer(*v.Port) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectClientAliasList(v []types.ServiceConnectClientAlias, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentServiceConnectClientAlias(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectConfiguration(v *types.ServiceConnectConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("enabled") + ok.Boolean(v.Enabled) + } + + if v.LogConfiguration != nil { + ok := object.Key("logConfiguration") + if err := awsAwsjson11_serializeDocumentLogConfiguration(v.LogConfiguration, ok); err != nil { + return err + } + } + + if v.Namespace != nil { + ok := object.Key("namespace") + ok.String(*v.Namespace) + } + + if v.Services != nil { + ok := object.Key("services") + if err := awsAwsjson11_serializeDocumentServiceConnectServiceList(v.Services, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectService(v *types.ServiceConnectService, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientAliases != nil { + ok := object.Key("clientAliases") + if err := awsAwsjson11_serializeDocumentServiceConnectClientAliasList(v.ClientAliases, ok); err != nil { + return err + } + } + + if v.DiscoveryName != nil { + ok := object.Key("discoveryName") + ok.String(*v.DiscoveryName) + } + + if v.IngressPortOverride != nil { + ok := object.Key("ingressPortOverride") + ok.Integer(*v.IngressPortOverride) + } + + if v.PortName != nil { + ok := object.Key("portName") + ok.String(*v.PortName) + } + + if v.Timeout != nil { + ok := object.Key("timeout") + if err := awsAwsjson11_serializeDocumentTimeoutConfiguration(v.Timeout, ok); err != nil { + return err + } + } + + if v.Tls != nil { + ok := object.Key("tls") + if err := awsAwsjson11_serializeDocumentServiceConnectTlsConfiguration(v.Tls, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectServiceList(v []types.ServiceConnectService, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentServiceConnectService(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectTlsCertificateAuthority(v *types.ServiceConnectTlsCertificateAuthority, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AwsPcaAuthorityArn != nil { + ok := object.Key("awsPcaAuthorityArn") + ok.String(*v.AwsPcaAuthorityArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceConnectTlsConfiguration(v *types.ServiceConnectTlsConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IssuerCertificateAuthority != nil { + ok := object.Key("issuerCertificateAuthority") + if err := awsAwsjson11_serializeDocumentServiceConnectTlsCertificateAuthority(v.IssuerCertificateAuthority, ok); err != nil { + return err + } + } + + if v.KmsKey != nil { + ok := object.Key("kmsKey") + ok.String(*v.KmsKey) + } + + if v.RoleArn != nil { + ok := object.Key("roleArn") + ok.String(*v.RoleArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceFieldList(v []types.ServiceField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentServiceManagedEBSVolumeConfiguration(v *types.ServiceManagedEBSVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Encrypted != nil { + ok := object.Key("encrypted") + ok.Boolean(*v.Encrypted) + } + + if len(v.FilesystemType) > 0 { + ok := object.Key("filesystemType") + ok.String(string(v.FilesystemType)) + } + + if v.Iops != nil { + ok := object.Key("iops") + ok.Integer(*v.Iops) + } + + if v.KmsKeyId != nil { + ok := object.Key("kmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.RoleArn != nil { + ok := object.Key("roleArn") + ok.String(*v.RoleArn) + } + + if v.SizeInGiB != nil { + ok := object.Key("sizeInGiB") + ok.Integer(*v.SizeInGiB) + } + + if v.SnapshotId != nil { + ok := object.Key("snapshotId") + ok.String(*v.SnapshotId) + } + + if v.TagSpecifications != nil { + ok := object.Key("tagSpecifications") + if err := awsAwsjson11_serializeDocumentEBSTagSpecifications(v.TagSpecifications, ok); err != nil { + return err + } + } + + if v.Throughput != nil { + ok := object.Key("throughput") + ok.Integer(*v.Throughput) + } + + if v.VolumeType != nil { + ok := object.Key("volumeType") + ok.String(*v.VolumeType) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceRegistries(v []types.ServiceRegistry, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentServiceRegistry(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentServiceRegistry(v *types.ServiceRegistry, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + if v.ContainerPort != nil { + ok := object.Key("containerPort") + ok.Integer(*v.ContainerPort) + } + + if v.Port != nil { + ok := object.Key("port") + ok.Integer(*v.Port) + } + + if v.RegistryArn != nil { + ok := object.Key("registryArn") + ok.String(*v.RegistryArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceVolumeConfiguration(v *types.ServiceVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ManagedEBSVolume != nil { + ok := object.Key("managedEBSVolume") + if err := awsAwsjson11_serializeDocumentServiceManagedEBSVolumeConfiguration(v.ManagedEBSVolume, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceVolumeConfigurations(v []types.ServiceVolumeConfiguration, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentServiceVolumeConfiguration(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentStringList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentStringMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentSystemControl(v *types.SystemControl, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Namespace != nil { + ok := object.Key("namespace") + ok.String(*v.Namespace) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSystemControls(v []types.SystemControl, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentSystemControl(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeys(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTags(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentTaskDefinitionFieldList(v []types.TaskDefinitionField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentTaskDefinitionPlacementConstraint(v *types.TaskDefinitionPlacementConstraint, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Expression != nil { + ok := object.Key("expression") + ok.String(*v.Expression) + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTaskDefinitionPlacementConstraints(v []types.TaskDefinitionPlacementConstraint, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTaskDefinitionPlacementConstraint(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentTaskFieldList(v []types.TaskField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentTaskManagedEBSVolumeConfiguration(v *types.TaskManagedEBSVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Encrypted != nil { + ok := object.Key("encrypted") + ok.Boolean(*v.Encrypted) + } + + if len(v.FilesystemType) > 0 { + ok := object.Key("filesystemType") + ok.String(string(v.FilesystemType)) + } + + if v.Iops != nil { + ok := object.Key("iops") + ok.Integer(*v.Iops) + } + + if v.KmsKeyId != nil { + ok := object.Key("kmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.RoleArn != nil { + ok := object.Key("roleArn") + ok.String(*v.RoleArn) + } + + if v.SizeInGiB != nil { + ok := object.Key("sizeInGiB") + ok.Integer(*v.SizeInGiB) + } + + if v.SnapshotId != nil { + ok := object.Key("snapshotId") + ok.String(*v.SnapshotId) + } + + if v.TagSpecifications != nil { + ok := object.Key("tagSpecifications") + if err := awsAwsjson11_serializeDocumentEBSTagSpecifications(v.TagSpecifications, ok); err != nil { + return err + } + } + + if v.TerminationPolicy != nil { + ok := object.Key("terminationPolicy") + if err := awsAwsjson11_serializeDocumentTaskManagedEBSVolumeTerminationPolicy(v.TerminationPolicy, ok); err != nil { + return err + } + } + + if v.Throughput != nil { + ok := object.Key("throughput") + ok.Integer(*v.Throughput) + } + + if v.VolumeType != nil { + ok := object.Key("volumeType") + ok.String(*v.VolumeType) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTaskManagedEBSVolumeTerminationPolicy(v *types.TaskManagedEBSVolumeTerminationPolicy, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DeleteOnTermination != nil { + ok := object.Key("deleteOnTermination") + ok.Boolean(*v.DeleteOnTermination) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTaskOverride(v *types.TaskOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerOverrides != nil { + ok := object.Key("containerOverrides") + if err := awsAwsjson11_serializeDocumentContainerOverrides(v.ContainerOverrides, ok); err != nil { + return err + } + } + + if v.Cpu != nil { + ok := object.Key("cpu") + ok.String(*v.Cpu) + } + + if v.EphemeralStorage != nil { + ok := object.Key("ephemeralStorage") + if err := awsAwsjson11_serializeDocumentEphemeralStorage(v.EphemeralStorage, ok); err != nil { + return err + } + } + + if v.ExecutionRoleArn != nil { + ok := object.Key("executionRoleArn") + ok.String(*v.ExecutionRoleArn) + } + + if v.InferenceAcceleratorOverrides != nil { + ok := object.Key("inferenceAcceleratorOverrides") + if err := awsAwsjson11_serializeDocumentInferenceAcceleratorOverrides(v.InferenceAcceleratorOverrides, ok); err != nil { + return err + } + } + + if v.Memory != nil { + ok := object.Key("memory") + ok.String(*v.Memory) + } + + if v.TaskRoleArn != nil { + ok := object.Key("taskRoleArn") + ok.String(*v.TaskRoleArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTaskSetFieldList(v []types.TaskSetField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentTaskVolumeConfiguration(v *types.TaskVolumeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ManagedEBSVolume != nil { + ok := object.Key("managedEBSVolume") + if err := awsAwsjson11_serializeDocumentTaskManagedEBSVolumeConfiguration(v.ManagedEBSVolume, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTaskVolumeConfigurations(v []types.TaskVolumeConfiguration, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTaskVolumeConfiguration(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentTimeoutConfiguration(v *types.TimeoutConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IdleTimeoutSeconds != nil { + ok := object.Key("idleTimeoutSeconds") + ok.Integer(*v.IdleTimeoutSeconds) + } + + if v.PerRequestTimeoutSeconds != nil { + ok := object.Key("perRequestTimeoutSeconds") + ok.Integer(*v.PerRequestTimeoutSeconds) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTmpfs(v *types.Tmpfs, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerPath != nil { + ok := object.Key("containerPath") + ok.String(*v.ContainerPath) + } + + if v.MountOptions != nil { + ok := object.Key("mountOptions") + if err := awsAwsjson11_serializeDocumentStringList(v.MountOptions, ok); err != nil { + return err + } + } + + { + ok := object.Key("size") + ok.Integer(v.Size) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTmpfsList(v []types.Tmpfs, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTmpfs(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentUlimit(v *types.Ulimit, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("hardLimit") + ok.Integer(v.HardLimit) + } + + if len(v.Name) > 0 { + ok := object.Key("name") + ok.String(string(v.Name)) + } + + { + ok := object.Key("softLimit") + ok.Integer(v.SoftLimit) + } + + return nil +} + +func awsAwsjson11_serializeDocumentUlimitList(v []types.Ulimit, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentUlimit(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentVersionInfo(v *types.VersionInfo, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AgentHash != nil { + ok := object.Key("agentHash") + ok.String(*v.AgentHash) + } + + if v.AgentVersion != nil { + ok := object.Key("agentVersion") + ok.String(*v.AgentVersion) + } + + if v.DockerVersion != nil { + ok := object.Key("dockerVersion") + ok.String(*v.DockerVersion) + } + + return nil +} + +func awsAwsjson11_serializeDocumentVolume(v *types.Volume, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConfiguredAtLaunch != nil { + ok := object.Key("configuredAtLaunch") + ok.Boolean(*v.ConfiguredAtLaunch) + } + + if v.DockerVolumeConfiguration != nil { + ok := object.Key("dockerVolumeConfiguration") + if err := awsAwsjson11_serializeDocumentDockerVolumeConfiguration(v.DockerVolumeConfiguration, ok); err != nil { + return err + } + } + + if v.EfsVolumeConfiguration != nil { + ok := object.Key("efsVolumeConfiguration") + if err := awsAwsjson11_serializeDocumentEFSVolumeConfiguration(v.EfsVolumeConfiguration, ok); err != nil { + return err + } + } + + if v.FsxWindowsFileServerVolumeConfiguration != nil { + ok := object.Key("fsxWindowsFileServerVolumeConfiguration") + if err := awsAwsjson11_serializeDocumentFSxWindowsFileServerVolumeConfiguration(v.FsxWindowsFileServerVolumeConfiguration, ok); err != nil { + return err + } + } + + if v.Host != nil { + ok := object.Key("host") + if err := awsAwsjson11_serializeDocumentHostVolumeProperties(v.Host, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeDocumentVolumeFrom(v *types.VolumeFrom, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReadOnly != nil { + ok := object.Key("readOnly") + ok.Boolean(*v.ReadOnly) + } + + if v.SourceContainer != nil { + ok := object.Key("sourceContainer") + ok.String(*v.SourceContainer) + } + + return nil +} + +func awsAwsjson11_serializeDocumentVolumeFromList(v []types.VolumeFrom, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentVolumeFrom(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentVolumeList(v []types.Volume, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentVolume(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateCapacityProviderInput(v *CreateCapacityProviderInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AutoScalingGroupProvider != nil { + ok := object.Key("autoScalingGroupProvider") + if err := awsAwsjson11_serializeDocumentAutoScalingGroupProvider(v.AutoScalingGroupProvider, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateClusterInput(v *CreateClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviders != nil { + ok := object.Key("capacityProviders") + if err := awsAwsjson11_serializeDocumentStringList(v.CapacityProviders, ok); err != nil { + return err + } + } + + if v.ClusterName != nil { + ok := object.Key("clusterName") + ok.String(*v.ClusterName) + } + + if v.Configuration != nil { + ok := object.Key("configuration") + if err := awsAwsjson11_serializeDocumentClusterConfiguration(v.Configuration, ok); err != nil { + return err + } + } + + if v.DefaultCapacityProviderStrategy != nil { + ok := object.Key("defaultCapacityProviderStrategy") + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategy(v.DefaultCapacityProviderStrategy, ok); err != nil { + return err + } + } + + if v.ServiceConnectDefaults != nil { + ok := object.Key("serviceConnectDefaults") + if err := awsAwsjson11_serializeDocumentClusterServiceConnectDefaultsRequest(v.ServiceConnectDefaults, ok); err != nil { + return err + } + } + + if v.Settings != nil { + ok := object.Key("settings") + if err := awsAwsjson11_serializeDocumentClusterSettings(v.Settings, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateServiceInput(v *CreateServiceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviderStrategy != nil { + ok := object.Key("capacityProviderStrategy") + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategy(v.CapacityProviderStrategy, ok); err != nil { + return err + } + } + + if v.ClientToken != nil { + ok := object.Key("clientToken") + ok.String(*v.ClientToken) + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.DeploymentConfiguration != nil { + ok := object.Key("deploymentConfiguration") + if err := awsAwsjson11_serializeDocumentDeploymentConfiguration(v.DeploymentConfiguration, ok); err != nil { + return err + } + } + + if v.DeploymentController != nil { + ok := object.Key("deploymentController") + if err := awsAwsjson11_serializeDocumentDeploymentController(v.DeploymentController, ok); err != nil { + return err + } + } + + if v.DesiredCount != nil { + ok := object.Key("desiredCount") + ok.Integer(*v.DesiredCount) + } + + if v.EnableECSManagedTags { + ok := object.Key("enableECSManagedTags") + ok.Boolean(v.EnableECSManagedTags) + } + + if v.EnableExecuteCommand { + ok := object.Key("enableExecuteCommand") + ok.Boolean(v.EnableExecuteCommand) + } + + if v.HealthCheckGracePeriodSeconds != nil { + ok := object.Key("healthCheckGracePeriodSeconds") + ok.Integer(*v.HealthCheckGracePeriodSeconds) + } + + if len(v.LaunchType) > 0 { + ok := object.Key("launchType") + ok.String(string(v.LaunchType)) + } + + if v.LoadBalancers != nil { + ok := object.Key("loadBalancers") + if err := awsAwsjson11_serializeDocumentLoadBalancers(v.LoadBalancers, ok); err != nil { + return err + } + } + + if v.NetworkConfiguration != nil { + ok := object.Key("networkConfiguration") + if err := awsAwsjson11_serializeDocumentNetworkConfiguration(v.NetworkConfiguration, ok); err != nil { + return err + } + } + + if v.PlacementConstraints != nil { + ok := object.Key("placementConstraints") + if err := awsAwsjson11_serializeDocumentPlacementConstraints(v.PlacementConstraints, ok); err != nil { + return err + } + } + + if v.PlacementStrategy != nil { + ok := object.Key("placementStrategy") + if err := awsAwsjson11_serializeDocumentPlacementStrategies(v.PlacementStrategy, ok); err != nil { + return err + } + } + + if v.PlatformVersion != nil { + ok := object.Key("platformVersion") + ok.String(*v.PlatformVersion) + } + + if len(v.PropagateTags) > 0 { + ok := object.Key("propagateTags") + ok.String(string(v.PropagateTags)) + } + + if v.Role != nil { + ok := object.Key("role") + ok.String(*v.Role) + } + + if len(v.SchedulingStrategy) > 0 { + ok := object.Key("schedulingStrategy") + ok.String(string(v.SchedulingStrategy)) + } + + if v.ServiceConnectConfiguration != nil { + ok := object.Key("serviceConnectConfiguration") + if err := awsAwsjson11_serializeDocumentServiceConnectConfiguration(v.ServiceConnectConfiguration, ok); err != nil { + return err + } + } + + if v.ServiceName != nil { + ok := object.Key("serviceName") + ok.String(*v.ServiceName) + } + + if v.ServiceRegistries != nil { + ok := object.Key("serviceRegistries") + if err := awsAwsjson11_serializeDocumentServiceRegistries(v.ServiceRegistries, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + if v.VolumeConfigurations != nil { + ok := object.Key("volumeConfigurations") + if err := awsAwsjson11_serializeDocumentServiceVolumeConfigurations(v.VolumeConfigurations, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateTaskSetInput(v *CreateTaskSetInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviderStrategy != nil { + ok := object.Key("capacityProviderStrategy") + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategy(v.CapacityProviderStrategy, ok); err != nil { + return err + } + } + + if v.ClientToken != nil { + ok := object.Key("clientToken") + ok.String(*v.ClientToken) + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ExternalId != nil { + ok := object.Key("externalId") + ok.String(*v.ExternalId) + } + + if len(v.LaunchType) > 0 { + ok := object.Key("launchType") + ok.String(string(v.LaunchType)) + } + + if v.LoadBalancers != nil { + ok := object.Key("loadBalancers") + if err := awsAwsjson11_serializeDocumentLoadBalancers(v.LoadBalancers, ok); err != nil { + return err + } + } + + if v.NetworkConfiguration != nil { + ok := object.Key("networkConfiguration") + if err := awsAwsjson11_serializeDocumentNetworkConfiguration(v.NetworkConfiguration, ok); err != nil { + return err + } + } + + if v.PlatformVersion != nil { + ok := object.Key("platformVersion") + ok.String(*v.PlatformVersion) + } + + if v.Scale != nil { + ok := object.Key("scale") + if err := awsAwsjson11_serializeDocumentScale(v.Scale, ok); err != nil { + return err + } + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + if v.ServiceRegistries != nil { + ok := object.Key("serviceRegistries") + if err := awsAwsjson11_serializeDocumentServiceRegistries(v.ServiceRegistries, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteAccountSettingInput(v *DeleteAccountSettingInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Name) > 0 { + ok := object.Key("name") + ok.String(string(v.Name)) + } + + if v.PrincipalArn != nil { + ok := object.Key("principalArn") + ok.String(*v.PrincipalArn) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteAttributesInput(v *DeleteAttributesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Attributes != nil { + ok := object.Key("attributes") + if err := awsAwsjson11_serializeDocumentAttributes(v.Attributes, ok); err != nil { + return err + } + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteCapacityProviderInput(v *DeleteCapacityProviderInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProvider != nil { + ok := object.Key("capacityProvider") + ok.String(*v.CapacityProvider) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteClusterInput(v *DeleteClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteServiceInput(v *DeleteServiceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Force != nil { + ok := object.Key("force") + ok.Boolean(*v.Force) + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteTaskDefinitionsInput(v *DeleteTaskDefinitionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TaskDefinitions != nil { + ok := object.Key("taskDefinitions") + if err := awsAwsjson11_serializeDocumentStringList(v.TaskDefinitions, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteTaskSetInput(v *DeleteTaskSetInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Force != nil { + ok := object.Key("force") + ok.Boolean(*v.Force) + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + if v.TaskSet != nil { + ok := object.Key("taskSet") + ok.String(*v.TaskSet) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeregisterContainerInstanceInput(v *DeregisterContainerInstanceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstance != nil { + ok := object.Key("containerInstance") + ok.String(*v.ContainerInstance) + } + + if v.Force != nil { + ok := object.Key("force") + ok.Boolean(*v.Force) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeregisterTaskDefinitionInput(v *DeregisterTaskDefinitionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeCapacityProvidersInput(v *DescribeCapacityProvidersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviders != nil { + ok := object.Key("capacityProviders") + if err := awsAwsjson11_serializeDocumentStringList(v.CapacityProviders, ok); err != nil { + return err + } + } + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentCapacityProviderFieldList(v.Include, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeClustersInput(v *DescribeClustersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Clusters != nil { + ok := object.Key("clusters") + if err := awsAwsjson11_serializeDocumentStringList(v.Clusters, ok); err != nil { + return err + } + } + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentClusterFieldList(v.Include, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeContainerInstancesInput(v *DescribeContainerInstancesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstances != nil { + ok := object.Key("containerInstances") + if err := awsAwsjson11_serializeDocumentStringList(v.ContainerInstances, ok); err != nil { + return err + } + } + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentContainerInstanceFieldList(v.Include, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeServicesInput(v *DescribeServicesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentServiceFieldList(v.Include, ok); err != nil { + return err + } + } + + if v.Services != nil { + ok := object.Key("services") + if err := awsAwsjson11_serializeDocumentStringList(v.Services, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeTaskDefinitionInput(v *DescribeTaskDefinitionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentTaskDefinitionFieldList(v.Include, ok); err != nil { + return err + } + } + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeTaskSetsInput(v *DescribeTaskSetsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentTaskSetFieldList(v.Include, ok); err != nil { + return err + } + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + if v.TaskSets != nil { + ok := object.Key("taskSets") + if err := awsAwsjson11_serializeDocumentStringList(v.TaskSets, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeTasksInput(v *DescribeTasksInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Include != nil { + ok := object.Key("include") + if err := awsAwsjson11_serializeDocumentTaskFieldList(v.Include, ok); err != nil { + return err + } + } + + if v.Tasks != nil { + ok := object.Key("tasks") + if err := awsAwsjson11_serializeDocumentStringList(v.Tasks, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDiscoverPollEndpointInput(v *DiscoverPollEndpointInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstance != nil { + ok := object.Key("containerInstance") + ok.String(*v.ContainerInstance) + } + + if v.ZoneId != nil { + ok := object.Key("zoneId") + ok.String(*v.ZoneId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentExecuteCommandInput(v *ExecuteCommandInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Command != nil { + ok := object.Key("command") + ok.String(*v.Command) + } + + if v.Container != nil { + ok := object.Key("container") + ok.String(*v.Container) + } + + { + ok := object.Key("interactive") + ok.Boolean(v.Interactive) + } + + if v.Task != nil { + ok := object.Key("task") + ok.String(*v.Task) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetTaskProtectionInput(v *GetTaskProtectionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Tasks != nil { + ok := object.Key("tasks") + if err := awsAwsjson11_serializeDocumentStringList(v.Tasks, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListAccountSettingsInput(v *ListAccountSettingsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EffectiveSettings { + ok := object.Key("effectiveSettings") + ok.Boolean(v.EffectiveSettings) + } + + if v.MaxResults != 0 { + ok := object.Key("maxResults") + ok.Integer(v.MaxResults) + } + + if len(v.Name) > 0 { + ok := object.Key("name") + ok.String(string(v.Name)) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.PrincipalArn != nil { + ok := object.Key("principalArn") + ok.String(*v.PrincipalArn) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListAttributesInput(v *ListAttributesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeName != nil { + ok := object.Key("attributeName") + ok.String(*v.AttributeName) + } + + if v.AttributeValue != nil { + ok := object.Key("attributeValue") + ok.String(*v.AttributeValue) + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if len(v.TargetType) > 0 { + ok := object.Key("targetType") + ok.String(string(v.TargetType)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListClustersInput(v *ListClustersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListContainerInstancesInput(v *ListContainerInstancesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Filter != nil { + ok := object.Key("filter") + ok.String(*v.Filter) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if len(v.Status) > 0 { + ok := object.Key("status") + ok.String(string(v.Status)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListServicesByNamespaceInput(v *ListServicesByNamespaceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.Namespace != nil { + ok := object.Key("namespace") + ok.String(*v.Namespace) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListServicesInput(v *ListServicesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if len(v.LaunchType) > 0 { + ok := object.Key("launchType") + ok.String(string(v.LaunchType)) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if len(v.SchedulingStrategy) > 0 { + ok := object.Key("schedulingStrategy") + ok.String(string(v.SchedulingStrategy)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTaskDefinitionFamiliesInput(v *ListTaskDefinitionFamiliesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FamilyPrefix != nil { + ok := object.Key("familyPrefix") + ok.String(*v.FamilyPrefix) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if len(v.Status) > 0 { + ok := object.Key("status") + ok.String(string(v.Status)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTaskDefinitionsInput(v *ListTaskDefinitionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FamilyPrefix != nil { + ok := object.Key("familyPrefix") + ok.String(*v.FamilyPrefix) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if len(v.Sort) > 0 { + ok := object.Key("sort") + ok.String(string(v.Sort)) + } + + if len(v.Status) > 0 { + ok := object.Key("status") + ok.String(string(v.Status)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTasksInput(v *ListTasksInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstance != nil { + ok := object.Key("containerInstance") + ok.String(*v.ContainerInstance) + } + + if len(v.DesiredStatus) > 0 { + ok := object.Key("desiredStatus") + ok.String(string(v.DesiredStatus)) + } + + if v.Family != nil { + ok := object.Key("family") + ok.String(*v.Family) + } + + if len(v.LaunchType) > 0 { + ok := object.Key("launchType") + ok.String(string(v.LaunchType)) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.ServiceName != nil { + ok := object.Key("serviceName") + ok.String(*v.ServiceName) + } + + if v.StartedBy != nil { + ok := object.Key("startedBy") + ok.String(*v.StartedBy) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutAccountSettingDefaultInput(v *PutAccountSettingDefaultInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Name) > 0 { + ok := object.Key("name") + ok.String(string(v.Name)) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutAccountSettingInput(v *PutAccountSettingInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Name) > 0 { + ok := object.Key("name") + ok.String(string(v.Name)) + } + + if v.PrincipalArn != nil { + ok := object.Key("principalArn") + ok.String(*v.PrincipalArn) + } + + if v.Value != nil { + ok := object.Key("value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutAttributesInput(v *PutAttributesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Attributes != nil { + ok := object.Key("attributes") + if err := awsAwsjson11_serializeDocumentAttributes(v.Attributes, ok); err != nil { + return err + } + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutClusterCapacityProvidersInput(v *PutClusterCapacityProvidersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviders != nil { + ok := object.Key("capacityProviders") + if err := awsAwsjson11_serializeDocumentStringList(v.CapacityProviders, ok); err != nil { + return err + } + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.DefaultCapacityProviderStrategy != nil { + ok := object.Key("defaultCapacityProviderStrategy") + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategy(v.DefaultCapacityProviderStrategy, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRegisterContainerInstanceInput(v *RegisterContainerInstanceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Attributes != nil { + ok := object.Key("attributes") + if err := awsAwsjson11_serializeDocumentAttributes(v.Attributes, ok); err != nil { + return err + } + } + + if v.ClientToken != nil { + ok := object.Key("clientToken") + ok.String(*v.ClientToken) + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstanceArn != nil { + ok := object.Key("containerInstanceArn") + ok.String(*v.ContainerInstanceArn) + } + + if v.InstanceIdentityDocument != nil { + ok := object.Key("instanceIdentityDocument") + ok.String(*v.InstanceIdentityDocument) + } + + if v.InstanceIdentityDocumentSignature != nil { + ok := object.Key("instanceIdentityDocumentSignature") + ok.String(*v.InstanceIdentityDocumentSignature) + } + + if v.PlatformDevices != nil { + ok := object.Key("platformDevices") + if err := awsAwsjson11_serializeDocumentPlatformDevices(v.PlatformDevices, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + if v.TotalResources != nil { + ok := object.Key("totalResources") + if err := awsAwsjson11_serializeDocumentResources(v.TotalResources, ok); err != nil { + return err + } + } + + if v.VersionInfo != nil { + ok := object.Key("versionInfo") + if err := awsAwsjson11_serializeDocumentVersionInfo(v.VersionInfo, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRegisterTaskDefinitionInput(v *RegisterTaskDefinitionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContainerDefinitions != nil { + ok := object.Key("containerDefinitions") + if err := awsAwsjson11_serializeDocumentContainerDefinitions(v.ContainerDefinitions, ok); err != nil { + return err + } + } + + if v.Cpu != nil { + ok := object.Key("cpu") + ok.String(*v.Cpu) + } + + if v.EphemeralStorage != nil { + ok := object.Key("ephemeralStorage") + if err := awsAwsjson11_serializeDocumentEphemeralStorage(v.EphemeralStorage, ok); err != nil { + return err + } + } + + if v.ExecutionRoleArn != nil { + ok := object.Key("executionRoleArn") + ok.String(*v.ExecutionRoleArn) + } + + if v.Family != nil { + ok := object.Key("family") + ok.String(*v.Family) + } + + if v.InferenceAccelerators != nil { + ok := object.Key("inferenceAccelerators") + if err := awsAwsjson11_serializeDocumentInferenceAccelerators(v.InferenceAccelerators, ok); err != nil { + return err + } + } + + if len(v.IpcMode) > 0 { + ok := object.Key("ipcMode") + ok.String(string(v.IpcMode)) + } + + if v.Memory != nil { + ok := object.Key("memory") + ok.String(*v.Memory) + } + + if len(v.NetworkMode) > 0 { + ok := object.Key("networkMode") + ok.String(string(v.NetworkMode)) + } + + if len(v.PidMode) > 0 { + ok := object.Key("pidMode") + ok.String(string(v.PidMode)) + } + + if v.PlacementConstraints != nil { + ok := object.Key("placementConstraints") + if err := awsAwsjson11_serializeDocumentTaskDefinitionPlacementConstraints(v.PlacementConstraints, ok); err != nil { + return err + } + } + + if v.ProxyConfiguration != nil { + ok := object.Key("proxyConfiguration") + if err := awsAwsjson11_serializeDocumentProxyConfiguration(v.ProxyConfiguration, ok); err != nil { + return err + } + } + + if v.RequiresCompatibilities != nil { + ok := object.Key("requiresCompatibilities") + if err := awsAwsjson11_serializeDocumentCompatibilityList(v.RequiresCompatibilities, ok); err != nil { + return err + } + } + + if v.RuntimePlatform != nil { + ok := object.Key("runtimePlatform") + if err := awsAwsjson11_serializeDocumentRuntimePlatform(v.RuntimePlatform, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + if v.TaskRoleArn != nil { + ok := object.Key("taskRoleArn") + ok.String(*v.TaskRoleArn) + } + + if v.Volumes != nil { + ok := object.Key("volumes") + if err := awsAwsjson11_serializeDocumentVolumeList(v.Volumes, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRunTaskInput(v *RunTaskInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviderStrategy != nil { + ok := object.Key("capacityProviderStrategy") + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategy(v.CapacityProviderStrategy, ok); err != nil { + return err + } + } + + if v.ClientToken != nil { + ok := object.Key("clientToken") + ok.String(*v.ClientToken) + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Count != nil { + ok := object.Key("count") + ok.Integer(*v.Count) + } + + if v.EnableECSManagedTags { + ok := object.Key("enableECSManagedTags") + ok.Boolean(v.EnableECSManagedTags) + } + + if v.EnableExecuteCommand { + ok := object.Key("enableExecuteCommand") + ok.Boolean(v.EnableExecuteCommand) + } + + if v.Group != nil { + ok := object.Key("group") + ok.String(*v.Group) + } + + if len(v.LaunchType) > 0 { + ok := object.Key("launchType") + ok.String(string(v.LaunchType)) + } + + if v.NetworkConfiguration != nil { + ok := object.Key("networkConfiguration") + if err := awsAwsjson11_serializeDocumentNetworkConfiguration(v.NetworkConfiguration, ok); err != nil { + return err + } + } + + if v.Overrides != nil { + ok := object.Key("overrides") + if err := awsAwsjson11_serializeDocumentTaskOverride(v.Overrides, ok); err != nil { + return err + } + } + + if v.PlacementConstraints != nil { + ok := object.Key("placementConstraints") + if err := awsAwsjson11_serializeDocumentPlacementConstraints(v.PlacementConstraints, ok); err != nil { + return err + } + } + + if v.PlacementStrategy != nil { + ok := object.Key("placementStrategy") + if err := awsAwsjson11_serializeDocumentPlacementStrategies(v.PlacementStrategy, ok); err != nil { + return err + } + } + + if v.PlatformVersion != nil { + ok := object.Key("platformVersion") + ok.String(*v.PlatformVersion) + } + + if len(v.PropagateTags) > 0 { + ok := object.Key("propagateTags") + ok.String(string(v.PropagateTags)) + } + + if v.ReferenceId != nil { + ok := object.Key("referenceId") + ok.String(*v.ReferenceId) + } + + if v.StartedBy != nil { + ok := object.Key("startedBy") + ok.String(*v.StartedBy) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + if v.VolumeConfigurations != nil { + ok := object.Key("volumeConfigurations") + if err := awsAwsjson11_serializeDocumentTaskVolumeConfigurations(v.VolumeConfigurations, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStartTaskInput(v *StartTaskInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstances != nil { + ok := object.Key("containerInstances") + if err := awsAwsjson11_serializeDocumentStringList(v.ContainerInstances, ok); err != nil { + return err + } + } + + if v.EnableECSManagedTags { + ok := object.Key("enableECSManagedTags") + ok.Boolean(v.EnableECSManagedTags) + } + + if v.EnableExecuteCommand { + ok := object.Key("enableExecuteCommand") + ok.Boolean(v.EnableExecuteCommand) + } + + if v.Group != nil { + ok := object.Key("group") + ok.String(*v.Group) + } + + if v.NetworkConfiguration != nil { + ok := object.Key("networkConfiguration") + if err := awsAwsjson11_serializeDocumentNetworkConfiguration(v.NetworkConfiguration, ok); err != nil { + return err + } + } + + if v.Overrides != nil { + ok := object.Key("overrides") + if err := awsAwsjson11_serializeDocumentTaskOverride(v.Overrides, ok); err != nil { + return err + } + } + + if len(v.PropagateTags) > 0 { + ok := object.Key("propagateTags") + ok.String(string(v.PropagateTags)) + } + + if v.ReferenceId != nil { + ok := object.Key("referenceId") + ok.String(*v.ReferenceId) + } + + if v.StartedBy != nil { + ok := object.Key("startedBy") + ok.String(*v.StartedBy) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + if v.VolumeConfigurations != nil { + ok := object.Key("volumeConfigurations") + if err := awsAwsjson11_serializeDocumentTaskVolumeConfigurations(v.VolumeConfigurations, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStopTaskInput(v *StopTaskInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Reason != nil { + ok := object.Key("reason") + ok.String(*v.Reason) + } + + if v.Task != nil { + ok := object.Key("task") + ok.String(*v.Task) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSubmitAttachmentStateChangesInput(v *SubmitAttachmentStateChangesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Attachments != nil { + ok := object.Key("attachments") + if err := awsAwsjson11_serializeDocumentAttachmentStateChanges(v.Attachments, ok); err != nil { + return err + } + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSubmitContainerStateChangeInput(v *SubmitContainerStateChangeInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerName != nil { + ok := object.Key("containerName") + ok.String(*v.ContainerName) + } + + if v.ExitCode != nil { + ok := object.Key("exitCode") + ok.Integer(*v.ExitCode) + } + + if v.NetworkBindings != nil { + ok := object.Key("networkBindings") + if err := awsAwsjson11_serializeDocumentNetworkBindings(v.NetworkBindings, ok); err != nil { + return err + } + } + + if v.Reason != nil { + ok := object.Key("reason") + ok.String(*v.Reason) + } + + if v.RuntimeId != nil { + ok := object.Key("runtimeId") + ok.String(*v.RuntimeId) + } + + if v.Status != nil { + ok := object.Key("status") + ok.String(*v.Status) + } + + if v.Task != nil { + ok := object.Key("task") + ok.String(*v.Task) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSubmitTaskStateChangeInput(v *SubmitTaskStateChangeInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Attachments != nil { + ok := object.Key("attachments") + if err := awsAwsjson11_serializeDocumentAttachmentStateChanges(v.Attachments, ok); err != nil { + return err + } + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Containers != nil { + ok := object.Key("containers") + if err := awsAwsjson11_serializeDocumentContainerStateChanges(v.Containers, ok); err != nil { + return err + } + } + + if v.ExecutionStoppedAt != nil { + ok := object.Key("executionStoppedAt") + ok.Double(smithytime.FormatEpochSeconds(*v.ExecutionStoppedAt)) + } + + if v.ManagedAgents != nil { + ok := object.Key("managedAgents") + if err := awsAwsjson11_serializeDocumentManagedAgentStateChanges(v.ManagedAgents, ok); err != nil { + return err + } + } + + if v.PullStartedAt != nil { + ok := object.Key("pullStartedAt") + ok.Double(smithytime.FormatEpochSeconds(*v.PullStartedAt)) + } + + if v.PullStoppedAt != nil { + ok := object.Key("pullStoppedAt") + ok.Double(smithytime.FormatEpochSeconds(*v.PullStoppedAt)) + } + + if v.Reason != nil { + ok := object.Key("reason") + ok.String(*v.Reason) + } + + if v.Status != nil { + ok := object.Key("status") + ok.String(*v.Status) + } + + if v.Task != nil { + ok := object.Key("task") + ok.String(*v.Task) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + if v.TagKeys != nil { + ok := object.Key("tagKeys") + if err := awsAwsjson11_serializeDocumentTagKeys(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateCapacityProviderInput(v *UpdateCapacityProviderInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AutoScalingGroupProvider != nil { + ok := object.Key("autoScalingGroupProvider") + if err := awsAwsjson11_serializeDocumentAutoScalingGroupProviderUpdate(v.AutoScalingGroupProvider, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateClusterInput(v *UpdateClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Configuration != nil { + ok := object.Key("configuration") + if err := awsAwsjson11_serializeDocumentClusterConfiguration(v.Configuration, ok); err != nil { + return err + } + } + + if v.ServiceConnectDefaults != nil { + ok := object.Key("serviceConnectDefaults") + if err := awsAwsjson11_serializeDocumentClusterServiceConnectDefaultsRequest(v.ServiceConnectDefaults, ok); err != nil { + return err + } + } + + if v.Settings != nil { + ok := object.Key("settings") + if err := awsAwsjson11_serializeDocumentClusterSettings(v.Settings, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateClusterSettingsInput(v *UpdateClusterSettingsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Settings != nil { + ok := object.Key("settings") + if err := awsAwsjson11_serializeDocumentClusterSettings(v.Settings, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateContainerAgentInput(v *UpdateContainerAgentInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstance != nil { + ok := object.Key("containerInstance") + ok.String(*v.ContainerInstance) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateContainerInstancesStateInput(v *UpdateContainerInstancesStateInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ContainerInstances != nil { + ok := object.Key("containerInstances") + if err := awsAwsjson11_serializeDocumentStringList(v.ContainerInstances, ok); err != nil { + return err + } + } + + if len(v.Status) > 0 { + ok := object.Key("status") + ok.String(string(v.Status)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateServiceInput(v *UpdateServiceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityProviderStrategy != nil { + ok := object.Key("capacityProviderStrategy") + if err := awsAwsjson11_serializeDocumentCapacityProviderStrategy(v.CapacityProviderStrategy, ok); err != nil { + return err + } + } + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.DeploymentConfiguration != nil { + ok := object.Key("deploymentConfiguration") + if err := awsAwsjson11_serializeDocumentDeploymentConfiguration(v.DeploymentConfiguration, ok); err != nil { + return err + } + } + + if v.DesiredCount != nil { + ok := object.Key("desiredCount") + ok.Integer(*v.DesiredCount) + } + + if v.EnableECSManagedTags != nil { + ok := object.Key("enableECSManagedTags") + ok.Boolean(*v.EnableECSManagedTags) + } + + if v.EnableExecuteCommand != nil { + ok := object.Key("enableExecuteCommand") + ok.Boolean(*v.EnableExecuteCommand) + } + + if v.ForceNewDeployment { + ok := object.Key("forceNewDeployment") + ok.Boolean(v.ForceNewDeployment) + } + + if v.HealthCheckGracePeriodSeconds != nil { + ok := object.Key("healthCheckGracePeriodSeconds") + ok.Integer(*v.HealthCheckGracePeriodSeconds) + } + + if v.LoadBalancers != nil { + ok := object.Key("loadBalancers") + if err := awsAwsjson11_serializeDocumentLoadBalancers(v.LoadBalancers, ok); err != nil { + return err + } + } + + if v.NetworkConfiguration != nil { + ok := object.Key("networkConfiguration") + if err := awsAwsjson11_serializeDocumentNetworkConfiguration(v.NetworkConfiguration, ok); err != nil { + return err + } + } + + if v.PlacementConstraints != nil { + ok := object.Key("placementConstraints") + if err := awsAwsjson11_serializeDocumentPlacementConstraints(v.PlacementConstraints, ok); err != nil { + return err + } + } + + if v.PlacementStrategy != nil { + ok := object.Key("placementStrategy") + if err := awsAwsjson11_serializeDocumentPlacementStrategies(v.PlacementStrategy, ok); err != nil { + return err + } + } + + if v.PlatformVersion != nil { + ok := object.Key("platformVersion") + ok.String(*v.PlatformVersion) + } + + if len(v.PropagateTags) > 0 { + ok := object.Key("propagateTags") + ok.String(string(v.PropagateTags)) + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + if v.ServiceConnectConfiguration != nil { + ok := object.Key("serviceConnectConfiguration") + if err := awsAwsjson11_serializeDocumentServiceConnectConfiguration(v.ServiceConnectConfiguration, ok); err != nil { + return err + } + } + + if v.ServiceRegistries != nil { + ok := object.Key("serviceRegistries") + if err := awsAwsjson11_serializeDocumentServiceRegistries(v.ServiceRegistries, ok); err != nil { + return err + } + } + + if v.TaskDefinition != nil { + ok := object.Key("taskDefinition") + ok.String(*v.TaskDefinition) + } + + if v.VolumeConfigurations != nil { + ok := object.Key("volumeConfigurations") + if err := awsAwsjson11_serializeDocumentServiceVolumeConfigurations(v.VolumeConfigurations, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateServicePrimaryTaskSetInput(v *UpdateServicePrimaryTaskSetInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.PrimaryTaskSet != nil { + ok := object.Key("primaryTaskSet") + ok.String(*v.PrimaryTaskSet) + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateTaskProtectionInput(v *UpdateTaskProtectionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.ExpiresInMinutes != nil { + ok := object.Key("expiresInMinutes") + ok.Integer(*v.ExpiresInMinutes) + } + + { + ok := object.Key("protectionEnabled") + ok.Boolean(v.ProtectionEnabled) + } + + if v.Tasks != nil { + ok := object.Key("tasks") + if err := awsAwsjson11_serializeDocumentStringList(v.Tasks, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateTaskSetInput(v *UpdateTaskSetInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Cluster != nil { + ok := object.Key("cluster") + ok.String(*v.Cluster) + } + + if v.Scale != nil { + ok := object.Key("scale") + if err := awsAwsjson11_serializeDocumentScale(v.Scale, ok); err != nil { + return err + } + } + + if v.Service != nil { + ok := object.Key("service") + ok.String(*v.Service) + } + + if v.TaskSet != nil { + ok := object.Key("taskSet") + ok.String(*v.TaskSet) + } + + return nil +} diff --git a/aws-sdk-go-v2/service/ecs/snapshot_test.go b/aws-sdk-go-v2/service/ecs/snapshot_test.go new file mode 100644 index 00000000000..f4eafb059b1 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/snapshot_test.go @@ -0,0 +1,1406 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +//go:build snapshot +package ecs + +import ( + "context" + "errors" + "fmt" + "io/fs" + "io" + "github.com/aws/smithy-go/middleware" + "os" + "testing" +) + +const ssprefix = "snapshot" + +type snapshotOK struct{} + +func (snapshotOK) Error() string { return "error: success" } + +func createp(path string) (*os.File, error) { + if err := os.Mkdir(ssprefix, 0700); err != nil && !errors.Is(err, fs.ErrExist) { + return nil, err + } + return os.Create(path) +} + +func sspath(op string) string { + return fmt.Sprintf("%s/api_op_%s.go.snap", ssprefix, op) +} + +func updateSnapshot(stack *middleware.Stack, operation string) error { + f, err := createp(sspath(operation)) + if err != nil { + return err + } + defer f.Close() + if _, err := f.Write([]byte(stack.String())); err != nil { + return err + } + return snapshotOK{} +} + +func testSnapshot(stack *middleware.Stack, operation string) error { + f, err := os.Open(sspath(operation)) + if errors.Is(err, fs.ErrNotExist) { + return snapshotOK{} + } + if err != nil { + return err + } + defer f.Close() + expected, err := io.ReadAll(f) + if err != nil { + return err + } + if actual := stack.String(); actual != string(expected) { + return fmt.Errorf("%s != %s", expected, actual) + } + return snapshotOK{} +} +func TestCheckSnapshot_CreateCapacityProvider(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateCapacityProvider(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateCapacityProvider") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_CreateCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_CreateService(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateService(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateService") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_CreateTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteAccountSetting(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteAccountSetting(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteAccountSetting") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteAttributes(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteAttributes(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteAttributes") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteCapacityProvider(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteCapacityProvider(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteCapacityProvider") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteService(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteService(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteService") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteTaskDefinitions(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteTaskDefinitions(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteTaskDefinitions") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeregisterContainerInstance(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeregisterContainerInstance(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeregisterContainerInstance") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeregisterTaskDefinition(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeregisterTaskDefinition(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeregisterTaskDefinition") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeCapacityProviders(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeCapacityProviders(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeCapacityProviders") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeClusters(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeClusters(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeClusters") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeContainerInstances(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeContainerInstances(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeContainerInstances") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeServices(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeServices(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeServices") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeTaskDefinition(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeTaskDefinition(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeTaskDefinition") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeTasks(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeTasks(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeTasks") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DescribeTaskSets(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeTaskSets(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeTaskSets") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DiscoverPollEndpoint(t *testing.T) { + svc := New(Options{}) + _, err := svc.DiscoverPollEndpoint(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DiscoverPollEndpoint") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ExecuteCommand(t *testing.T) { + svc := New(Options{}) + _, err := svc.ExecuteCommand(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ExecuteCommand") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_GetTaskProtection(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetTaskProtection(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetTaskProtection") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListAccountSettings(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListAccountSettings(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListAccountSettings") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListAttributes(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListAttributes(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListAttributes") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListClusters(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListClusters(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListClusters") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListContainerInstances(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListContainerInstances(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListContainerInstances") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListServices(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListServices(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListServices") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListServicesByNamespace(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListServicesByNamespace(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListServicesByNamespace") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListTagsForResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTagsForResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTagsForResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListTaskDefinitionFamilies(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTaskDefinitionFamilies(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTaskDefinitionFamilies") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListTaskDefinitions(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTaskDefinitions(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTaskDefinitions") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListTasks(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTasks(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTasks") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_PutAccountSetting(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutAccountSetting(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "PutAccountSetting") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_PutAccountSettingDefault(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutAccountSettingDefault(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "PutAccountSettingDefault") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_PutAttributes(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutAttributes(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "PutAttributes") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_PutClusterCapacityProviders(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutClusterCapacityProviders(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "PutClusterCapacityProviders") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_RegisterContainerInstance(t *testing.T) { + svc := New(Options{}) + _, err := svc.RegisterContainerInstance(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "RegisterContainerInstance") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_RegisterTaskDefinition(t *testing.T) { + svc := New(Options{}) + _, err := svc.RegisterTaskDefinition(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "RegisterTaskDefinition") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_RunTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.RunTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "RunTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_StartTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.StartTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "StartTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_StopTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.StopTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "StopTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_SubmitAttachmentStateChanges(t *testing.T) { + svc := New(Options{}) + _, err := svc.SubmitAttachmentStateChanges(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "SubmitAttachmentStateChanges") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_SubmitContainerStateChange(t *testing.T) { + svc := New(Options{}) + _, err := svc.SubmitContainerStateChange(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "SubmitContainerStateChange") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_SubmitTaskStateChange(t *testing.T) { + svc := New(Options{}) + _, err := svc.SubmitTaskStateChange(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "SubmitTaskStateChange") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_TagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.TagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "TagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UntagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.UntagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UntagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateCapacityProvider(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateCapacityProvider(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateCapacityProvider") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateClusterSettings(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateClusterSettings(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateClusterSettings") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateContainerAgent(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateContainerAgent(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateContainerAgent") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateContainerInstancesState(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateContainerInstancesState(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateContainerInstancesState") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateService(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateService(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateService") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateServicePrimaryTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateServicePrimaryTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateServicePrimaryTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateTaskProtection(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateTaskProtection(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateTaskProtection") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} +func TestUpdateSnapshot_CreateCapacityProvider(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateCapacityProvider(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateCapacityProvider") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_CreateCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_CreateService(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateService(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateService") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_CreateTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteAccountSetting(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteAccountSetting(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteAccountSetting") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteAttributes(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteAttributes(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteAttributes") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteCapacityProvider(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteCapacityProvider(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteCapacityProvider") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteService(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteService(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteService") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteTaskDefinitions(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteTaskDefinitions(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteTaskDefinitions") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeregisterContainerInstance(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeregisterContainerInstance(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeregisterContainerInstance") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeregisterTaskDefinition(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeregisterTaskDefinition(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeregisterTaskDefinition") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeCapacityProviders(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeCapacityProviders(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeCapacityProviders") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeClusters(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeClusters(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeClusters") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeContainerInstances(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeContainerInstances(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeContainerInstances") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeServices(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeServices(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeServices") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeTaskDefinition(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeTaskDefinition(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeTaskDefinition") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeTasks(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeTasks(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeTasks") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DescribeTaskSets(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeTaskSets(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeTaskSets") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DiscoverPollEndpoint(t *testing.T) { + svc := New(Options{}) + _, err := svc.DiscoverPollEndpoint(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DiscoverPollEndpoint") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ExecuteCommand(t *testing.T) { + svc := New(Options{}) + _, err := svc.ExecuteCommand(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ExecuteCommand") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_GetTaskProtection(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetTaskProtection(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetTaskProtection") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListAccountSettings(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListAccountSettings(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListAccountSettings") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListAttributes(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListAttributes(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListAttributes") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListClusters(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListClusters(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListClusters") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListContainerInstances(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListContainerInstances(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListContainerInstances") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListServices(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListServices(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListServices") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListServicesByNamespace(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListServicesByNamespace(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListServicesByNamespace") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListTagsForResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTagsForResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTagsForResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListTaskDefinitionFamilies(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTaskDefinitionFamilies(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTaskDefinitionFamilies") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListTaskDefinitions(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTaskDefinitions(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTaskDefinitions") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListTasks(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTasks(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTasks") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_PutAccountSetting(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutAccountSetting(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "PutAccountSetting") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_PutAccountSettingDefault(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutAccountSettingDefault(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "PutAccountSettingDefault") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_PutAttributes(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutAttributes(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "PutAttributes") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_PutClusterCapacityProviders(t *testing.T) { + svc := New(Options{}) + _, err := svc.PutClusterCapacityProviders(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "PutClusterCapacityProviders") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_RegisterContainerInstance(t *testing.T) { + svc := New(Options{}) + _, err := svc.RegisterContainerInstance(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "RegisterContainerInstance") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_RegisterTaskDefinition(t *testing.T) { + svc := New(Options{}) + _, err := svc.RegisterTaskDefinition(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "RegisterTaskDefinition") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_RunTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.RunTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "RunTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_StartTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.StartTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "StartTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_StopTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.StopTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "StopTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_SubmitAttachmentStateChanges(t *testing.T) { + svc := New(Options{}) + _, err := svc.SubmitAttachmentStateChanges(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "SubmitAttachmentStateChanges") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_SubmitContainerStateChange(t *testing.T) { + svc := New(Options{}) + _, err := svc.SubmitContainerStateChange(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "SubmitContainerStateChange") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_SubmitTaskStateChange(t *testing.T) { + svc := New(Options{}) + _, err := svc.SubmitTaskStateChange(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "SubmitTaskStateChange") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_TagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.TagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "TagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UntagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.UntagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UntagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateCapacityProvider(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateCapacityProvider(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateCapacityProvider") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateClusterSettings(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateClusterSettings(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateClusterSettings") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateContainerAgent(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateContainerAgent(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateContainerAgent") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateContainerInstancesState(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateContainerInstancesState(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateContainerInstancesState") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateService(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateService(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateService") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateServicePrimaryTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateServicePrimaryTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateServicePrimaryTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateTaskProtection(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateTaskProtection(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateTaskProtection") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateTaskSet(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateTaskSet(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateTaskSet") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} diff --git a/aws-sdk-go-v2/service/ecs/types/enums.go b/aws-sdk-go-v2/service/ecs/types/enums.go new file mode 100644 index 00000000000..95102df8ccb --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/types/enums.go @@ -0,0 +1,1297 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package types + +type AgentUpdateStatus string + +// Enum values for AgentUpdateStatus +const ( + AgentUpdateStatusPending AgentUpdateStatus = "PENDING" + AgentUpdateStatusStaging AgentUpdateStatus = "STAGING" + AgentUpdateStatusStaged AgentUpdateStatus = "STAGED" + AgentUpdateStatusUpdating AgentUpdateStatus = "UPDATING" + AgentUpdateStatusUpdated AgentUpdateStatus = "UPDATED" + AgentUpdateStatusFailed AgentUpdateStatus = "FAILED" +) + +// Values returns all known values for AgentUpdateStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (AgentUpdateStatus) Values() []AgentUpdateStatus { + return []AgentUpdateStatus{ + "PENDING", + "STAGING", + "STAGED", + "UPDATING", + "UPDATED", + "FAILED", + } +} + +type ApplicationProtocol string + +// Enum values for ApplicationProtocol +const ( + ApplicationProtocolHttp ApplicationProtocol = "http" + ApplicationProtocolHttp2 ApplicationProtocol = "http2" + ApplicationProtocolGrpc ApplicationProtocol = "grpc" +) + +// Values returns all known values for ApplicationProtocol. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ApplicationProtocol) Values() []ApplicationProtocol { + return []ApplicationProtocol{ + "http", + "http2", + "grpc", + } +} + +type AssignPublicIp string + +// Enum values for AssignPublicIp +const ( + AssignPublicIpEnabled AssignPublicIp = "ENABLED" + AssignPublicIpDisabled AssignPublicIp = "DISABLED" +) + +// Values returns all known values for AssignPublicIp. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (AssignPublicIp) Values() []AssignPublicIp { + return []AssignPublicIp{ + "ENABLED", + "DISABLED", + } +} + +type CapacityProviderField string + +// Enum values for CapacityProviderField +const ( + CapacityProviderFieldTags CapacityProviderField = "TAGS" +) + +// Values returns all known values for CapacityProviderField. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityProviderField) Values() []CapacityProviderField { + return []CapacityProviderField{ + "TAGS", + } +} + +type CapacityProviderStatus string + +// Enum values for CapacityProviderStatus +const ( + CapacityProviderStatusActive CapacityProviderStatus = "ACTIVE" + CapacityProviderStatusInactive CapacityProviderStatus = "INACTIVE" +) + +// Values returns all known values for CapacityProviderStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityProviderStatus) Values() []CapacityProviderStatus { + return []CapacityProviderStatus{ + "ACTIVE", + "INACTIVE", + } +} + +type CapacityProviderUpdateStatus string + +// Enum values for CapacityProviderUpdateStatus +const ( + CapacityProviderUpdateStatusDeleteInProgress CapacityProviderUpdateStatus = "DELETE_IN_PROGRESS" + CapacityProviderUpdateStatusDeleteComplete CapacityProviderUpdateStatus = "DELETE_COMPLETE" + CapacityProviderUpdateStatusDeleteFailed CapacityProviderUpdateStatus = "DELETE_FAILED" + CapacityProviderUpdateStatusUpdateInProgress CapacityProviderUpdateStatus = "UPDATE_IN_PROGRESS" + CapacityProviderUpdateStatusUpdateComplete CapacityProviderUpdateStatus = "UPDATE_COMPLETE" + CapacityProviderUpdateStatusUpdateFailed CapacityProviderUpdateStatus = "UPDATE_FAILED" +) + +// Values returns all known values for CapacityProviderUpdateStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityProviderUpdateStatus) Values() []CapacityProviderUpdateStatus { + return []CapacityProviderUpdateStatus{ + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_FAILED", + } +} + +type ClusterField string + +// Enum values for ClusterField +const ( + ClusterFieldAttachments ClusterField = "ATTACHMENTS" + ClusterFieldConfigurations ClusterField = "CONFIGURATIONS" + ClusterFieldSettings ClusterField = "SETTINGS" + ClusterFieldStatistics ClusterField = "STATISTICS" + ClusterFieldTags ClusterField = "TAGS" +) + +// Values returns all known values for ClusterField. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ClusterField) Values() []ClusterField { + return []ClusterField{ + "ATTACHMENTS", + "CONFIGURATIONS", + "SETTINGS", + "STATISTICS", + "TAGS", + } +} + +type ClusterSettingName string + +// Enum values for ClusterSettingName +const ( + ClusterSettingNameContainerInsights ClusterSettingName = "containerInsights" +) + +// Values returns all known values for ClusterSettingName. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ClusterSettingName) Values() []ClusterSettingName { + return []ClusterSettingName{ + "containerInsights", + } +} + +type Compatibility string + +// Enum values for Compatibility +const ( + CompatibilityEc2 Compatibility = "EC2" + CompatibilityFargate Compatibility = "FARGATE" + CompatibilityExternal Compatibility = "EXTERNAL" +) + +// Values returns all known values for Compatibility. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (Compatibility) Values() []Compatibility { + return []Compatibility{ + "EC2", + "FARGATE", + "EXTERNAL", + } +} + +type Connectivity string + +// Enum values for Connectivity +const ( + ConnectivityConnected Connectivity = "CONNECTED" + ConnectivityDisconnected Connectivity = "DISCONNECTED" +) + +// Values returns all known values for Connectivity. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (Connectivity) Values() []Connectivity { + return []Connectivity{ + "CONNECTED", + "DISCONNECTED", + } +} + +type ContainerCondition string + +// Enum values for ContainerCondition +const ( + ContainerConditionStart ContainerCondition = "START" + ContainerConditionComplete ContainerCondition = "COMPLETE" + ContainerConditionSuccess ContainerCondition = "SUCCESS" + ContainerConditionHealthy ContainerCondition = "HEALTHY" +) + +// Values returns all known values for ContainerCondition. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContainerCondition) Values() []ContainerCondition { + return []ContainerCondition{ + "START", + "COMPLETE", + "SUCCESS", + "HEALTHY", + } +} + +type ContainerInstanceField string + +// Enum values for ContainerInstanceField +const ( + ContainerInstanceFieldTags ContainerInstanceField = "TAGS" + ContainerInstanceFieldContainerInstanceHealth ContainerInstanceField = "CONTAINER_INSTANCE_HEALTH" +) + +// Values returns all known values for ContainerInstanceField. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContainerInstanceField) Values() []ContainerInstanceField { + return []ContainerInstanceField{ + "TAGS", + "CONTAINER_INSTANCE_HEALTH", + } +} + +type ContainerInstanceStatus string + +// Enum values for ContainerInstanceStatus +const ( + ContainerInstanceStatusActive ContainerInstanceStatus = "ACTIVE" + ContainerInstanceStatusDraining ContainerInstanceStatus = "DRAINING" + ContainerInstanceStatusRegistering ContainerInstanceStatus = "REGISTERING" + ContainerInstanceStatusDeregistering ContainerInstanceStatus = "DEREGISTERING" + ContainerInstanceStatusRegistrationFailed ContainerInstanceStatus = "REGISTRATION_FAILED" +) + +// Values returns all known values for ContainerInstanceStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContainerInstanceStatus) Values() []ContainerInstanceStatus { + return []ContainerInstanceStatus{ + "ACTIVE", + "DRAINING", + "REGISTERING", + "DEREGISTERING", + "REGISTRATION_FAILED", + } +} + +type CPUArchitecture string + +// Enum values for CPUArchitecture +const ( + CPUArchitectureX8664 CPUArchitecture = "X86_64" + CPUArchitectureArm64 CPUArchitecture = "ARM64" +) + +// Values returns all known values for CPUArchitecture. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CPUArchitecture) Values() []CPUArchitecture { + return []CPUArchitecture{ + "X86_64", + "ARM64", + } +} + +type DeploymentControllerType string + +// Enum values for DeploymentControllerType +const ( + DeploymentControllerTypeEcs DeploymentControllerType = "ECS" + DeploymentControllerTypeCodeDeploy DeploymentControllerType = "CODE_DEPLOY" + DeploymentControllerTypeExternal DeploymentControllerType = "EXTERNAL" +) + +// Values returns all known values for DeploymentControllerType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DeploymentControllerType) Values() []DeploymentControllerType { + return []DeploymentControllerType{ + "ECS", + "CODE_DEPLOY", + "EXTERNAL", + } +} + +type DeploymentRolloutState string + +// Enum values for DeploymentRolloutState +const ( + DeploymentRolloutStateCompleted DeploymentRolloutState = "COMPLETED" + DeploymentRolloutStateFailed DeploymentRolloutState = "FAILED" + DeploymentRolloutStateInProgress DeploymentRolloutState = "IN_PROGRESS" +) + +// Values returns all known values for DeploymentRolloutState. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DeploymentRolloutState) Values() []DeploymentRolloutState { + return []DeploymentRolloutState{ + "COMPLETED", + "FAILED", + "IN_PROGRESS", + } +} + +type DesiredStatus string + +// Enum values for DesiredStatus +const ( + DesiredStatusRunning DesiredStatus = "RUNNING" + DesiredStatusPending DesiredStatus = "PENDING" + DesiredStatusStopped DesiredStatus = "STOPPED" +) + +// Values returns all known values for DesiredStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DesiredStatus) Values() []DesiredStatus { + return []DesiredStatus{ + "RUNNING", + "PENDING", + "STOPPED", + } +} + +type DeviceCgroupPermission string + +// Enum values for DeviceCgroupPermission +const ( + DeviceCgroupPermissionRead DeviceCgroupPermission = "read" + DeviceCgroupPermissionWrite DeviceCgroupPermission = "write" + DeviceCgroupPermissionMknod DeviceCgroupPermission = "mknod" +) + +// Values returns all known values for DeviceCgroupPermission. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DeviceCgroupPermission) Values() []DeviceCgroupPermission { + return []DeviceCgroupPermission{ + "read", + "write", + "mknod", + } +} + +type EBSResourceType string + +// Enum values for EBSResourceType +const ( + EBSResourceTypeVolume EBSResourceType = "volume" +) + +// Values returns all known values for EBSResourceType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EBSResourceType) Values() []EBSResourceType { + return []EBSResourceType{ + "volume", + } +} + +type EFSAuthorizationConfigIAM string + +// Enum values for EFSAuthorizationConfigIAM +const ( + EFSAuthorizationConfigIAMEnabled EFSAuthorizationConfigIAM = "ENABLED" + EFSAuthorizationConfigIAMDisabled EFSAuthorizationConfigIAM = "DISABLED" +) + +// Values returns all known values for EFSAuthorizationConfigIAM. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EFSAuthorizationConfigIAM) Values() []EFSAuthorizationConfigIAM { + return []EFSAuthorizationConfigIAM{ + "ENABLED", + "DISABLED", + } +} + +type EFSTransitEncryption string + +// Enum values for EFSTransitEncryption +const ( + EFSTransitEncryptionEnabled EFSTransitEncryption = "ENABLED" + EFSTransitEncryptionDisabled EFSTransitEncryption = "DISABLED" +) + +// Values returns all known values for EFSTransitEncryption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EFSTransitEncryption) Values() []EFSTransitEncryption { + return []EFSTransitEncryption{ + "ENABLED", + "DISABLED", + } +} + +type EnvironmentFileType string + +// Enum values for EnvironmentFileType +const ( + EnvironmentFileTypeS3 EnvironmentFileType = "s3" +) + +// Values returns all known values for EnvironmentFileType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EnvironmentFileType) Values() []EnvironmentFileType { + return []EnvironmentFileType{ + "s3", + } +} + +type ExecuteCommandLogging string + +// Enum values for ExecuteCommandLogging +const ( + ExecuteCommandLoggingNone ExecuteCommandLogging = "NONE" + ExecuteCommandLoggingDefault ExecuteCommandLogging = "DEFAULT" + ExecuteCommandLoggingOverride ExecuteCommandLogging = "OVERRIDE" +) + +// Values returns all known values for ExecuteCommandLogging. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExecuteCommandLogging) Values() []ExecuteCommandLogging { + return []ExecuteCommandLogging{ + "NONE", + "DEFAULT", + "OVERRIDE", + } +} + +type FirelensConfigurationType string + +// Enum values for FirelensConfigurationType +const ( + FirelensConfigurationTypeFluentd FirelensConfigurationType = "fluentd" + FirelensConfigurationTypeFluentbit FirelensConfigurationType = "fluentbit" +) + +// Values returns all known values for FirelensConfigurationType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (FirelensConfigurationType) Values() []FirelensConfigurationType { + return []FirelensConfigurationType{ + "fluentd", + "fluentbit", + } +} + +type HealthStatus string + +// Enum values for HealthStatus +const ( + HealthStatusHealthy HealthStatus = "HEALTHY" + HealthStatusUnhealthy HealthStatus = "UNHEALTHY" + HealthStatusUnknown HealthStatus = "UNKNOWN" +) + +// Values returns all known values for HealthStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (HealthStatus) Values() []HealthStatus { + return []HealthStatus{ + "HEALTHY", + "UNHEALTHY", + "UNKNOWN", + } +} + +type InstanceHealthCheckState string + +// Enum values for InstanceHealthCheckState +const ( + InstanceHealthCheckStateOk InstanceHealthCheckState = "OK" + InstanceHealthCheckStateImpaired InstanceHealthCheckState = "IMPAIRED" + InstanceHealthCheckStateInsufficientData InstanceHealthCheckState = "INSUFFICIENT_DATA" + InstanceHealthCheckStateInitializing InstanceHealthCheckState = "INITIALIZING" +) + +// Values returns all known values for InstanceHealthCheckState. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InstanceHealthCheckState) Values() []InstanceHealthCheckState { + return []InstanceHealthCheckState{ + "OK", + "IMPAIRED", + "INSUFFICIENT_DATA", + "INITIALIZING", + } +} + +type InstanceHealthCheckType string + +// Enum values for InstanceHealthCheckType +const ( + InstanceHealthCheckTypeContainerRuntime InstanceHealthCheckType = "CONTAINER_RUNTIME" +) + +// Values returns all known values for InstanceHealthCheckType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InstanceHealthCheckType) Values() []InstanceHealthCheckType { + return []InstanceHealthCheckType{ + "CONTAINER_RUNTIME", + } +} + +type IpcMode string + +// Enum values for IpcMode +const ( + IpcModeHost IpcMode = "host" + IpcModeTask IpcMode = "task" + IpcModeNone IpcMode = "none" +) + +// Values returns all known values for IpcMode. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (IpcMode) Values() []IpcMode { + return []IpcMode{ + "host", + "task", + "none", + } +} + +type LaunchType string + +// Enum values for LaunchType +const ( + LaunchTypeEc2 LaunchType = "EC2" + LaunchTypeFargate LaunchType = "FARGATE" + LaunchTypeExternal LaunchType = "EXTERNAL" +) + +// Values returns all known values for LaunchType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (LaunchType) Values() []LaunchType { + return []LaunchType{ + "EC2", + "FARGATE", + "EXTERNAL", + } +} + +type LogDriver string + +// Enum values for LogDriver +const ( + LogDriverJsonFile LogDriver = "json-file" + LogDriverSyslog LogDriver = "syslog" + LogDriverJournald LogDriver = "journald" + LogDriverGelf LogDriver = "gelf" + LogDriverFluentd LogDriver = "fluentd" + LogDriverAwslogs LogDriver = "awslogs" + LogDriverSplunk LogDriver = "splunk" + LogDriverAwsfirelens LogDriver = "awsfirelens" +) + +// Values returns all known values for LogDriver. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (LogDriver) Values() []LogDriver { + return []LogDriver{ + "json-file", + "syslog", + "journald", + "gelf", + "fluentd", + "awslogs", + "splunk", + "awsfirelens", + } +} + +type ManagedAgentName string + +// Enum values for ManagedAgentName +const ( + ManagedAgentNameExecuteCommandAgent ManagedAgentName = "ExecuteCommandAgent" +) + +// Values returns all known values for ManagedAgentName. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ManagedAgentName) Values() []ManagedAgentName { + return []ManagedAgentName{ + "ExecuteCommandAgent", + } +} + +type ManagedDraining string + +// Enum values for ManagedDraining +const ( + ManagedDrainingEnabled ManagedDraining = "ENABLED" + ManagedDrainingDisabled ManagedDraining = "DISABLED" +) + +// Values returns all known values for ManagedDraining. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ManagedDraining) Values() []ManagedDraining { + return []ManagedDraining{ + "ENABLED", + "DISABLED", + } +} + +type ManagedScalingStatus string + +// Enum values for ManagedScalingStatus +const ( + ManagedScalingStatusEnabled ManagedScalingStatus = "ENABLED" + ManagedScalingStatusDisabled ManagedScalingStatus = "DISABLED" +) + +// Values returns all known values for ManagedScalingStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ManagedScalingStatus) Values() []ManagedScalingStatus { + return []ManagedScalingStatus{ + "ENABLED", + "DISABLED", + } +} + +type ManagedTerminationProtection string + +// Enum values for ManagedTerminationProtection +const ( + ManagedTerminationProtectionEnabled ManagedTerminationProtection = "ENABLED" + ManagedTerminationProtectionDisabled ManagedTerminationProtection = "DISABLED" +) + +// Values returns all known values for ManagedTerminationProtection. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ManagedTerminationProtection) Values() []ManagedTerminationProtection { + return []ManagedTerminationProtection{ + "ENABLED", + "DISABLED", + } +} + +type NetworkMode string + +// Enum values for NetworkMode +const ( + NetworkModeBridge NetworkMode = "bridge" + NetworkModeHost NetworkMode = "host" + NetworkModeAwsvpc NetworkMode = "awsvpc" + NetworkModeNone NetworkMode = "none" +) + +// Values returns all known values for NetworkMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (NetworkMode) Values() []NetworkMode { + return []NetworkMode{ + "bridge", + "host", + "awsvpc", + "none", + } +} + +type OSFamily string + +// Enum values for OSFamily +const ( + OSFamilyWindowsServer2019Full OSFamily = "WINDOWS_SERVER_2019_FULL" + OSFamilyWindowsServer2019Core OSFamily = "WINDOWS_SERVER_2019_CORE" + OSFamilyWindowsServer2016Full OSFamily = "WINDOWS_SERVER_2016_FULL" + OSFamilyWindowsServer2004Core OSFamily = "WINDOWS_SERVER_2004_CORE" + OSFamilyWindowsServer2022Core OSFamily = "WINDOWS_SERVER_2022_CORE" + OSFamilyWindowsServer2022Full OSFamily = "WINDOWS_SERVER_2022_FULL" + OSFamilyWindowsServer20h2Core OSFamily = "WINDOWS_SERVER_20H2_CORE" + OSFamilyLinux OSFamily = "LINUX" +) + +// Values returns all known values for OSFamily. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (OSFamily) Values() []OSFamily { + return []OSFamily{ + "WINDOWS_SERVER_2019_FULL", + "WINDOWS_SERVER_2019_CORE", + "WINDOWS_SERVER_2016_FULL", + "WINDOWS_SERVER_2004_CORE", + "WINDOWS_SERVER_2022_CORE", + "WINDOWS_SERVER_2022_FULL", + "WINDOWS_SERVER_20H2_CORE", + "LINUX", + } +} + +type PidMode string + +// Enum values for PidMode +const ( + PidModeHost PidMode = "host" + PidModeTask PidMode = "task" +) + +// Values returns all known values for PidMode. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PidMode) Values() []PidMode { + return []PidMode{ + "host", + "task", + } +} + +type PlacementConstraintType string + +// Enum values for PlacementConstraintType +const ( + PlacementConstraintTypeDistinctInstance PlacementConstraintType = "distinctInstance" + PlacementConstraintTypeMemberOf PlacementConstraintType = "memberOf" +) + +// Values returns all known values for PlacementConstraintType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PlacementConstraintType) Values() []PlacementConstraintType { + return []PlacementConstraintType{ + "distinctInstance", + "memberOf", + } +} + +type PlacementStrategyType string + +// Enum values for PlacementStrategyType +const ( + PlacementStrategyTypeRandom PlacementStrategyType = "random" + PlacementStrategyTypeSpread PlacementStrategyType = "spread" + PlacementStrategyTypeBinpack PlacementStrategyType = "binpack" +) + +// Values returns all known values for PlacementStrategyType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PlacementStrategyType) Values() []PlacementStrategyType { + return []PlacementStrategyType{ + "random", + "spread", + "binpack", + } +} + +type PlatformDeviceType string + +// Enum values for PlatformDeviceType +const ( + PlatformDeviceTypeGpu PlatformDeviceType = "GPU" +) + +// Values returns all known values for PlatformDeviceType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PlatformDeviceType) Values() []PlatformDeviceType { + return []PlatformDeviceType{ + "GPU", + } +} + +type PropagateTags string + +// Enum values for PropagateTags +const ( + PropagateTagsTaskDefinition PropagateTags = "TASK_DEFINITION" + PropagateTagsService PropagateTags = "SERVICE" + PropagateTagsNone PropagateTags = "NONE" +) + +// Values returns all known values for PropagateTags. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PropagateTags) Values() []PropagateTags { + return []PropagateTags{ + "TASK_DEFINITION", + "SERVICE", + "NONE", + } +} + +type ProxyConfigurationType string + +// Enum values for ProxyConfigurationType +const ( + ProxyConfigurationTypeAppmesh ProxyConfigurationType = "APPMESH" +) + +// Values returns all known values for ProxyConfigurationType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ProxyConfigurationType) Values() []ProxyConfigurationType { + return []ProxyConfigurationType{ + "APPMESH", + } +} + +type ResourceType string + +// Enum values for ResourceType +const ( + ResourceTypeGpu ResourceType = "GPU" + ResourceTypeInferenceAccelerator ResourceType = "InferenceAccelerator" +) + +// Values returns all known values for ResourceType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ResourceType) Values() []ResourceType { + return []ResourceType{ + "GPU", + "InferenceAccelerator", + } +} + +type ScaleUnit string + +// Enum values for ScaleUnit +const ( + ScaleUnitPercent ScaleUnit = "PERCENT" +) + +// Values returns all known values for ScaleUnit. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ScaleUnit) Values() []ScaleUnit { + return []ScaleUnit{ + "PERCENT", + } +} + +type SchedulingStrategy string + +// Enum values for SchedulingStrategy +const ( + SchedulingStrategyReplica SchedulingStrategy = "REPLICA" + SchedulingStrategyDaemon SchedulingStrategy = "DAEMON" +) + +// Values returns all known values for SchedulingStrategy. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SchedulingStrategy) Values() []SchedulingStrategy { + return []SchedulingStrategy{ + "REPLICA", + "DAEMON", + } +} + +type Scope string + +// Enum values for Scope +const ( + ScopeTask Scope = "task" + ScopeShared Scope = "shared" +) + +// Values returns all known values for Scope. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (Scope) Values() []Scope { + return []Scope{ + "task", + "shared", + } +} + +type ServiceField string + +// Enum values for ServiceField +const ( + ServiceFieldTags ServiceField = "TAGS" +) + +// Values returns all known values for ServiceField. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ServiceField) Values() []ServiceField { + return []ServiceField{ + "TAGS", + } +} + +type SettingName string + +// Enum values for SettingName +const ( + SettingNameServiceLongArnFormat SettingName = "serviceLongArnFormat" + SettingNameTaskLongArnFormat SettingName = "taskLongArnFormat" + SettingNameContainerInstanceLongArnFormat SettingName = "containerInstanceLongArnFormat" + SettingNameAwsvpcTrunking SettingName = "awsvpcTrunking" + SettingNameContainerInsights SettingName = "containerInsights" + SettingNameFargateFipsMode SettingName = "fargateFIPSMode" + SettingNameTagResourceAuthorization SettingName = "tagResourceAuthorization" + SettingNameFargateTaskRetirementWaitPeriod SettingName = "fargateTaskRetirementWaitPeriod" + SettingNameGuardDutyActivate SettingName = "guardDutyActivate" +) + +// Values returns all known values for SettingName. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SettingName) Values() []SettingName { + return []SettingName{ + "serviceLongArnFormat", + "taskLongArnFormat", + "containerInstanceLongArnFormat", + "awsvpcTrunking", + "containerInsights", + "fargateFIPSMode", + "tagResourceAuthorization", + "fargateTaskRetirementWaitPeriod", + "guardDutyActivate", + } +} + +type SettingType string + +// Enum values for SettingType +const ( + SettingTypeUser SettingType = "user" + SettingTypeAwsManaged SettingType = "aws_managed" +) + +// Values returns all known values for SettingType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SettingType) Values() []SettingType { + return []SettingType{ + "user", + "aws_managed", + } +} + +type SortOrder string + +// Enum values for SortOrder +const ( + SortOrderAsc SortOrder = "ASC" + SortOrderDesc SortOrder = "DESC" +) + +// Values returns all known values for SortOrder. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SortOrder) Values() []SortOrder { + return []SortOrder{ + "ASC", + "DESC", + } +} + +type StabilityStatus string + +// Enum values for StabilityStatus +const ( + StabilityStatusSteadyState StabilityStatus = "STEADY_STATE" + StabilityStatusStabilizing StabilityStatus = "STABILIZING" +) + +// Values returns all known values for StabilityStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (StabilityStatus) Values() []StabilityStatus { + return []StabilityStatus{ + "STEADY_STATE", + "STABILIZING", + } +} + +type TargetType string + +// Enum values for TargetType +const ( + TargetTypeContainerInstance TargetType = "container-instance" +) + +// Values returns all known values for TargetType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TargetType) Values() []TargetType { + return []TargetType{ + "container-instance", + } +} + +type TaskDefinitionFamilyStatus string + +// Enum values for TaskDefinitionFamilyStatus +const ( + TaskDefinitionFamilyStatusActive TaskDefinitionFamilyStatus = "ACTIVE" + TaskDefinitionFamilyStatusInactive TaskDefinitionFamilyStatus = "INACTIVE" + TaskDefinitionFamilyStatusAll TaskDefinitionFamilyStatus = "ALL" +) + +// Values returns all known values for TaskDefinitionFamilyStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskDefinitionFamilyStatus) Values() []TaskDefinitionFamilyStatus { + return []TaskDefinitionFamilyStatus{ + "ACTIVE", + "INACTIVE", + "ALL", + } +} + +type TaskDefinitionField string + +// Enum values for TaskDefinitionField +const ( + TaskDefinitionFieldTags TaskDefinitionField = "TAGS" +) + +// Values returns all known values for TaskDefinitionField. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskDefinitionField) Values() []TaskDefinitionField { + return []TaskDefinitionField{ + "TAGS", + } +} + +type TaskDefinitionPlacementConstraintType string + +// Enum values for TaskDefinitionPlacementConstraintType +const ( + TaskDefinitionPlacementConstraintTypeMemberOf TaskDefinitionPlacementConstraintType = "memberOf" +) + +// Values returns all known values for TaskDefinitionPlacementConstraintType. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskDefinitionPlacementConstraintType) Values() []TaskDefinitionPlacementConstraintType { + return []TaskDefinitionPlacementConstraintType{ + "memberOf", + } +} + +type TaskDefinitionStatus string + +// Enum values for TaskDefinitionStatus +const ( + TaskDefinitionStatusActive TaskDefinitionStatus = "ACTIVE" + TaskDefinitionStatusInactive TaskDefinitionStatus = "INACTIVE" + TaskDefinitionStatusDeleteInProgress TaskDefinitionStatus = "DELETE_IN_PROGRESS" +) + +// Values returns all known values for TaskDefinitionStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskDefinitionStatus) Values() []TaskDefinitionStatus { + return []TaskDefinitionStatus{ + "ACTIVE", + "INACTIVE", + "DELETE_IN_PROGRESS", + } +} + +type TaskField string + +// Enum values for TaskField +const ( + TaskFieldTags TaskField = "TAGS" +) + +// Values returns all known values for TaskField. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskField) Values() []TaskField { + return []TaskField{ + "TAGS", + } +} + +type TaskFilesystemType string + +// Enum values for TaskFilesystemType +const ( + TaskFilesystemTypeExt3 TaskFilesystemType = "ext3" + TaskFilesystemTypeExt4 TaskFilesystemType = "ext4" + TaskFilesystemTypeXfs TaskFilesystemType = "xfs" +) + +// Values returns all known values for TaskFilesystemType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskFilesystemType) Values() []TaskFilesystemType { + return []TaskFilesystemType{ + "ext3", + "ext4", + "xfs", + } +} + +type TaskSetField string + +// Enum values for TaskSetField +const ( + TaskSetFieldTags TaskSetField = "TAGS" +) + +// Values returns all known values for TaskSetField. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskSetField) Values() []TaskSetField { + return []TaskSetField{ + "TAGS", + } +} + +type TaskStopCode string + +// Enum values for TaskStopCode +const ( + TaskStopCodeTaskFailedToStart TaskStopCode = "TaskFailedToStart" + TaskStopCodeEssentialContainerExited TaskStopCode = "EssentialContainerExited" + TaskStopCodeUserInitiated TaskStopCode = "UserInitiated" + TaskStopCodeServiceSchedulerInitiated TaskStopCode = "ServiceSchedulerInitiated" + TaskStopCodeSpotInterruption TaskStopCode = "SpotInterruption" + TaskStopCodeTerminationNotice TaskStopCode = "TerminationNotice" +) + +// Values returns all known values for TaskStopCode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TaskStopCode) Values() []TaskStopCode { + return []TaskStopCode{ + "TaskFailedToStart", + "EssentialContainerExited", + "UserInitiated", + "ServiceSchedulerInitiated", + "SpotInterruption", + "TerminationNotice", + } +} + +type TransportProtocol string + +// Enum values for TransportProtocol +const ( + TransportProtocolTcp TransportProtocol = "tcp" + TransportProtocolUdp TransportProtocol = "udp" +) + +// Values returns all known values for TransportProtocol. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TransportProtocol) Values() []TransportProtocol { + return []TransportProtocol{ + "tcp", + "udp", + } +} + +type UlimitName string + +// Enum values for UlimitName +const ( + UlimitNameCore UlimitName = "core" + UlimitNameCpu UlimitName = "cpu" + UlimitNameData UlimitName = "data" + UlimitNameFsize UlimitName = "fsize" + UlimitNameLocks UlimitName = "locks" + UlimitNameMemlock UlimitName = "memlock" + UlimitNameMsgqueue UlimitName = "msgqueue" + UlimitNameNice UlimitName = "nice" + UlimitNameNofile UlimitName = "nofile" + UlimitNameNproc UlimitName = "nproc" + UlimitNameRss UlimitName = "rss" + UlimitNameRtprio UlimitName = "rtprio" + UlimitNameRttime UlimitName = "rttime" + UlimitNameSigpending UlimitName = "sigpending" + UlimitNameStack UlimitName = "stack" +) + +// Values returns all known values for UlimitName. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (UlimitName) Values() []UlimitName { + return []UlimitName{ + "core", + "cpu", + "data", + "fsize", + "locks", + "memlock", + "msgqueue", + "nice", + "nofile", + "nproc", + "rss", + "rtprio", + "rttime", + "sigpending", + "stack", + } +} diff --git a/aws-sdk-go-v2/service/ecs/types/errors.go b/aws-sdk-go-v2/service/ecs/types/errors.go new file mode 100644 index 00000000000..a86da0e9c0f --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/types/errors.go @@ -0,0 +1,763 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You don't have authorization to perform the requested action. +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You can apply up to 10 custom attributes for each resource. You can view the +// attributes of a resource with [ListAttributes]. You can remove existing attributes on a +// resource with [DeleteAttributes]. +// +// [ListAttributes]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListAttributes.html +// [DeleteAttributes]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteAttributes.html +type AttributeLimitExceededException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *AttributeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AttributeLimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AttributeLimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AttributeLimitExceededException" + } + return *e.ErrorCodeOverride +} +func (e *AttributeLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Your Amazon Web Services account was blocked. For more information, contact [Amazon Web Services Support]. +// +// [Amazon Web Services Support]: http://aws.amazon.com/contact-us/ +type BlockedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *BlockedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BlockedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BlockedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "BlockedException" + } + return *e.ErrorCodeOverride +} +func (e *BlockedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// These errors are usually caused by a client action. This client action might be +// using an action or resource on behalf of a user that doesn't have permissions to +// use the action or resource. Or, it might be specifying an identifier that isn't +// valid. +// +// The following list includes additional causes for the error: +// +// - The RunTask could not be processed because you use managed scaling and there +// is a capacity error because the quota of tasks in the PROVISIONING per cluster +// has been reached. For information about the service quotas, see [Amazon ECS service quotas]. +// +// [Amazon ECS service quotas]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-quotas.html +type ClientException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ClientException" + } + return *e.ErrorCodeOverride +} +func (e *ClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You can't delete a cluster that has registered container instances. First, +// deregister the container instances before you can delete the cluster. For more +// information, see [DeregisterContainerInstance]. +// +// [DeregisterContainerInstance]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterContainerInstance.html +type ClusterContainsContainerInstancesException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ClusterContainsContainerInstancesException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterContainsContainerInstancesException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterContainsContainerInstancesException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ClusterContainsContainerInstancesException" + } + return *e.ErrorCodeOverride +} +func (e *ClusterContainsContainerInstancesException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You can't delete a cluster that contains services. First, update the service to +// reduce its desired task count to 0, and then delete the service. For more +// information, see [UpdateService]and [DeleteService]. +// +// [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html +// [DeleteService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteService.html +type ClusterContainsServicesException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ClusterContainsServicesException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterContainsServicesException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterContainsServicesException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ClusterContainsServicesException" + } + return *e.ErrorCodeOverride +} +func (e *ClusterContainsServicesException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You can't delete a cluster that has active tasks. +type ClusterContainsTasksException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ClusterContainsTasksException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterContainsTasksException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterContainsTasksException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ClusterContainsTasksException" + } + return *e.ErrorCodeOverride +} +func (e *ClusterContainsTasksException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified cluster wasn't found. You can view your available clusters with [ListClusters]. +// Amazon ECS clusters are Region specific. +// +// [ListClusters]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListClusters.html +type ClusterNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ClusterNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ClusterNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ClusterNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The RunTask request could not be processed due to conflicts. The provided +// clientToken is already in use with a different RunTask request. The resourceIds +// are the existing task ARNs which are already associated with the clientToken . +// +// To fix this issue: +// +// - Run RunTask with a unique clientToken . +// +// - Run RunTask with the clientToken and the original set of parameters +type ConflictException struct { + Message *string + + ErrorCodeOverride *string + + ResourceIds []string + + noSmithyDocumentSerde +} + +func (e *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ConflictException" + } + return *e.ErrorCodeOverride +} +func (e *ConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified parameter isn't valid. Review the available parameters for the +// API request. +type InvalidParameterException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidParameterException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidParameterException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The limit for the resource was exceeded. +type LimitExceededException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LimitExceededException" + } + return *e.ErrorCodeOverride +} +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Amazon ECS can't determine the current version of the Amazon ECS container +// agent on the container instance and doesn't have enough information to proceed +// with an update. This could be because the agent running on the container +// instance is a previous or custom version that doesn't use our version +// information. +type MissingVersionException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MissingVersionException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MissingVersionException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MissingVersionException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MissingVersionException" + } + return *e.ErrorCodeOverride +} +func (e *MissingVersionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified namespace wasn't found. +type NamespaceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NamespaceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NamespaceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NamespaceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NamespaceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *NamespaceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// There's no update available for this Amazon ECS container agent. This might be +// because the agent is already running the latest version or because it's so old +// that there's no update path to the current version. +type NoUpdateAvailableException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NoUpdateAvailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoUpdateAvailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoUpdateAvailableException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NoUpdateAvailableException" + } + return *e.ErrorCodeOverride +} +func (e *NoUpdateAvailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified platform version doesn't satisfy the required capabilities of the +// task definition. +type PlatformTaskDefinitionIncompatibilityException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PlatformTaskDefinitionIncompatibilityException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PlatformTaskDefinitionIncompatibilityException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PlatformTaskDefinitionIncompatibilityException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PlatformTaskDefinitionIncompatibilityException" + } + return *e.ErrorCodeOverride +} +func (e *PlatformTaskDefinitionIncompatibilityException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified platform version doesn't exist. +type PlatformUnknownException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PlatformUnknownException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PlatformUnknownException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PlatformUnknownException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PlatformUnknownException" + } + return *e.ErrorCodeOverride +} +func (e *PlatformUnknownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource is in-use and can't be removed. +type ResourceInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceInUseException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource wasn't found. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// These errors are usually caused by a server issue. +type ServerException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServerException" + } + return *e.ErrorCodeOverride +} +func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The specified service isn't active. You can't update a service that's inactive. +// If you have previously deleted a service, you can re-create it with [CreateService]. +// +// [CreateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html +type ServiceNotActiveException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ServiceNotActiveException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceNotActiveException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceNotActiveException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServiceNotActiveException" + } + return *e.ErrorCodeOverride +} +func (e *ServiceNotActiveException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified service wasn't found. You can view your available services with [ListServices]. +// Amazon ECS services are cluster specific and Region specific. +// +// [ListServices]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListServices.html +type ServiceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ServiceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServiceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ServiceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The execute command cannot run. This error can be caused by any of the +// following configuration issues: +// +// - Incorrect IAM permissions +// +// - The SSM agent is not installed or is not running +// +// - There is an interface Amazon VPC endpoint for Amazon ECS, but there is not +// one for Systems Manager Session Manager +// +// For information about how to troubleshoot the issues, see [Troubleshooting issues with ECS Exec] in the Amazon +// Elastic Container Service Developer Guide. +// +// [Troubleshooting issues with ECS Exec]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html +type TargetNotConnectedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TargetNotConnectedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TargetNotConnectedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TargetNotConnectedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TargetNotConnectedException" + } + return *e.ErrorCodeOverride +} +func (e *TargetNotConnectedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified target wasn't found. You can view your available container +// instances with [ListContainerInstances]. Amazon ECS container instances are cluster-specific and +// Region-specific. +// +// [ListContainerInstances]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListContainerInstances.html +type TargetNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TargetNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TargetNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TargetNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TargetNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *TargetNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified task set wasn't found. You can view your available task sets with [DescribeTaskSets] +// . Task sets are specific to each cluster, service and Region. +// +// [DescribeTaskSets]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTaskSets.html +type TaskSetNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TaskSetNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TaskSetNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TaskSetNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TaskSetNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *TaskSetNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified task isn't supported in this Region. +type UnsupportedFeatureException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedFeatureException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedFeatureException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedFeatureException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedFeatureException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedFeatureException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// There's already a current Amazon ECS container agent update in progress on the +// container instance that's specified. If the container agent becomes disconnected +// while it's in a transitional stage, such as PENDING or STAGING , the update +// process can get stuck in that state. However, when the agent reconnects, it +// resumes where it stopped previously. +type UpdateInProgressException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UpdateInProgressException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UpdateInProgressException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UpdateInProgressException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UpdateInProgressException" + } + return *e.ErrorCodeOverride +} +func (e *UpdateInProgressException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/aws-sdk-go-v2/service/ecs/types/types.go b/aws-sdk-go-v2/service/ecs/types/types.go new file mode 100644 index 00000000000..be3c6d06ae8 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/types/types.go @@ -0,0 +1,5597 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// An object representing a container instance or task attachment. +type Attachment struct { + + // Details of the attachment. + // + // For elastic network interfaces, this includes the network interface ID, the MAC + // address, the subnet ID, and the private IPv4 address. + // + // For Service Connect services, this includes portName , clientAliases , + // discoveryName , and ingressPortOverride . + // + // For Elastic Block Storage, this includes roleArn , deleteOnTermination , + // volumeName , volumeId , and statusReason (only when the attachment fails to + // create or attach). + Details []KeyValuePair + + // The unique identifier for the attachment. + Id *string + + // The status of the attachment. Valid values are PRECREATED , CREATED , ATTACHING + // , ATTACHED , DETACHING , DETACHED , DELETED , and FAILED . + Status *string + + // The type of the attachment, such as ElasticNetworkInterface , Service Connect , + // and AmazonElasticBlockStorage . + Type *string + + noSmithyDocumentSerde +} + +// An object representing a change in state for a task attachment. +type AttachmentStateChange struct { + + // The Amazon Resource Name (ARN) of the attachment. + // + // This member is required. + AttachmentArn *string + + // The status of the attachment. + // + // This member is required. + Status *string + + noSmithyDocumentSerde +} + +// An attribute is a name-value pair that's associated with an Amazon ECS object. +// Use attributes to extend the Amazon ECS data model by adding custom metadata to +// your resources. For more information, see [Attributes]in the Amazon Elastic Container +// Service Developer Guide. +// +// [Attributes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes +type Attribute struct { + + // The name of the attribute. The name must contain between 1 and 128 characters. + // The name may contain letters (uppercase and lowercase), numbers, hyphens (-), + // underscores (_), forward slashes (/), back slashes (\), or periods (.). + // + // This member is required. + Name *string + + // The ID of the target. You can specify the short form ID for a resource or the + // full Amazon Resource Name (ARN). + TargetId *string + + // The type of the target to attach the attribute with. This parameter is required + // if you use the short form ID for a resource instead of the full ARN. + TargetType TargetType + + // The value of the attribute. The value must contain between 1 and 128 + // characters. It can contain letters (uppercase and lowercase), numbers, hyphens + // (-), underscores (_), periods (.), at signs (@), forward slashes (/), back + // slashes (\), colons (:), or spaces. The value can't start or end with a space. + Value *string + + noSmithyDocumentSerde +} + +// The details of the Auto Scaling group for the capacity provider. +type AutoScalingGroupProvider struct { + + // The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the + // Auto Scaling group name. + // + // This member is required. + AutoScalingGroupArn *string + + // The managed draining option for the Auto Scaling group capacity provider. When + // you enable this, Amazon ECS manages and gracefully drains the EC2 container + // instances that are in the Auto Scaling group capacity provider. + ManagedDraining ManagedDraining + + // The managed scaling settings for the Auto Scaling group capacity provider. + ManagedScaling *ManagedScaling + + // The managed termination protection setting to use for the Auto Scaling group + // capacity provider. This determines whether the Auto Scaling group has managed + // termination protection. The default is off. + // + // When using managed termination protection, managed scaling must also be used + // otherwise managed termination protection doesn't work. + // + // When managed termination protection is on, Amazon ECS prevents the Amazon EC2 + // instances in an Auto Scaling group that contain tasks from being terminated + // during a scale-in action. The Auto Scaling group and each instance in the Auto + // Scaling group must have instance protection from scale-in actions on as well. + // For more information, see [Instance Protection]in the Auto Scaling User Guide. + // + // When managed termination protection is off, your Amazon EC2 instances aren't + // protected from termination when the Auto Scaling group scales in. + // + // [Instance Protection]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection + ManagedTerminationProtection ManagedTerminationProtection + + noSmithyDocumentSerde +} + +// The details of the Auto Scaling group capacity provider to update. +type AutoScalingGroupProviderUpdate struct { + + // The managed draining option for the Auto Scaling group capacity provider. When + // you enable this, Amazon ECS manages and gracefully drains the EC2 container + // instances that are in the Auto Scaling group capacity provider. + ManagedDraining ManagedDraining + + // The managed scaling settings for the Auto Scaling group capacity provider. + ManagedScaling *ManagedScaling + + // The managed termination protection setting to use for the Auto Scaling group + // capacity provider. This determines whether the Auto Scaling group has managed + // termination protection. + // + // When using managed termination protection, managed scaling must also be used + // otherwise managed termination protection doesn't work. + // + // When managed termination protection is on, Amazon ECS prevents the Amazon EC2 + // instances in an Auto Scaling group that contain tasks from being terminated + // during a scale-in action. The Auto Scaling group and each instance in the Auto + // Scaling group must have instance protection from scale-in actions on. For more + // information, see [Instance Protection]in the Auto Scaling User Guide. + // + // When managed termination protection is off, your Amazon EC2 instances aren't + // protected from termination when the Auto Scaling group scales in. + // + // [Instance Protection]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection + ManagedTerminationProtection ManagedTerminationProtection + + noSmithyDocumentSerde +} + +// An object representing the networking details for a task or service. For +// example +// awsVpcConfiguration={subnets=["subnet-12344321"],securityGroups=["sg-12344321"]} +// . +type AwsVpcConfiguration struct { + + // The IDs of the subnets associated with the task or service. There's a limit of + // 16 subnets that can be specified per awsvpcConfiguration . + // + // All specified subnets must be from the same VPC. + // + // This member is required. + Subnets []string + + // Whether the task's elastic network interface receives a public IP address. The + // default value is DISABLED . + AssignPublicIp AssignPublicIp + + // The IDs of the security groups associated with the task or service. If you + // don't specify a security group, the default security group for the VPC is used. + // There's a limit of 5 security groups that can be specified per + // awsvpcConfiguration . + // + // All specified security groups must be from the same VPC. + SecurityGroups []string + + noSmithyDocumentSerde +} + +// The details for a capacity provider. +type CapacityProvider struct { + + // The Auto Scaling group settings for the capacity provider. + AutoScalingGroupProvider *AutoScalingGroupProvider + + // The Amazon Resource Name (ARN) that identifies the capacity provider. + CapacityProviderArn *string + + // The name of the capacity provider. + Name *string + + // The current status of the capacity provider. Only capacity providers in an + // ACTIVE state can be used in a cluster. When a capacity provider is successfully + // deleted, it has an INACTIVE status. + Status CapacityProviderStatus + + // The metadata that you apply to the capacity provider to help you categorize and + // organize it. Each tag consists of a key and an optional value. You define both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []Tag + + // The update status of the capacity provider. The following are the possible + // states that is returned. + // + // DELETE_IN_PROGRESS The capacity provider is in the process of being deleted. + // + // DELETE_COMPLETE The capacity provider was successfully deleted and has an + // INACTIVE status. + // + // DELETE_FAILED The capacity provider can't be deleted. The update status reason + // provides further details about why the delete failed. + UpdateStatus CapacityProviderUpdateStatus + + // The update status reason. This provides further details about the update status + // for the capacity provider. + UpdateStatusReason *string + + noSmithyDocumentSerde +} + +// The details of a capacity provider strategy. A capacity provider strategy can +// be set when using the [RunTask]or [CreateCluster] APIs or as the default capacity provider strategy for +// a cluster with the CreateCluster API. +// +// Only capacity providers that are already associated with a cluster and have an +// ACTIVE or UPDATING status can be used in a capacity provider strategy. The [PutClusterCapacityProviders] API +// is used to associate a capacity provider with a cluster. +// +// If specifying a capacity provider that uses an Auto Scaling group, the capacity +// provider must already be created. New Auto Scaling group capacity providers can +// be created with the [CreateClusterCapacityProvider]API operation. +// +// To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT +// capacity providers. The Fargate capacity providers are available to all accounts +// and only need to be associated with a cluster to be used in a capacity provider +// strategy. +// +// With FARGATE_SPOT , you can run interruption tolerant tasks at a rate that's +// discounted compared to the FARGATE price. FARGATE_SPOT runs tasks on spare +// compute capacity. When Amazon Web Services needs the capacity back, your tasks +// are interrupted with a two-minute warning. FARGATE_SPOT supports Linux tasks +// with the X86_64 architecture on platform version 1.3.0 or later. FARGATE_SPOT +// supports Linux tasks with the ARM64 architecture on platform version 1.4.0 or +// later. +// +// A capacity provider strategy may contain a maximum of 6 capacity providers. +// +// [PutClusterCapacityProviders]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html +// [RunTask]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html +// [CreateClusterCapacityProvider]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateClusterCapacityProvider.html +// [CreateCluster]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCluster.html +type CapacityProviderStrategyItem struct { + + // The short name of the capacity provider. + // + // This member is required. + CapacityProvider *string + + // The base value designates how many tasks, at a minimum, to run on the specified + // capacity provider. Only one capacity provider in a capacity provider strategy + // can have a base defined. If no value is specified, the default value of 0 is + // used. + Base int32 + + // The weight value designates the relative percentage of the total number of + // tasks launched that should use the specified capacity provider. The weight + // value is taken into consideration after the base value, if defined, is + // satisfied. + // + // If no weight value is specified, the default value of 0 is used. When multiple + // capacity providers are specified within a capacity provider strategy, at least + // one of the capacity providers must have a weight value greater than zero and any + // capacity providers with a weight of 0 can't be used to place tasks. If you + // specify multiple capacity providers in a strategy that all have a weight of 0 , + // any RunTask or CreateService actions using the capacity provider strategy will + // fail. + // + // An example scenario for using weights is defining a strategy that contains two + // capacity providers and both have a weight of 1 , then when the base is + // satisfied, the tasks will be split evenly across the two capacity providers. + // Using that same logic, if you specify a weight of 1 for capacityProviderA and a + // weight of 4 for capacityProviderB, then for every one task that's run using + // capacityProviderA, four tasks would use capacityProviderB. + Weight int32 + + noSmithyDocumentSerde +} + +// A regional grouping of one or more container instances where you can run task +// requests. Each account receives a default cluster the first time you use the +// Amazon ECS service, but you may also create other clusters. Clusters may contain +// more than one instance type simultaneously. +type Cluster struct { + + // The number of services that are running on the cluster in an ACTIVE state. You + // can view these services with [PListServices]. + // + // [PListServices]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListServices.html + ActiveServicesCount int32 + + // The resources attached to a cluster. When using a capacity provider with a + // cluster, the capacity provider and associated resources are returned as cluster + // attachments. + Attachments []Attachment + + // The status of the capacity providers associated with the cluster. The following + // are the states that are returned. + // + // UPDATE_IN_PROGRESS The available capacity providers for the cluster are + // updating. + // + // UPDATE_COMPLETE The capacity providers have successfully updated. + // + // UPDATE_FAILED The capacity provider updates failed. + AttachmentsStatus *string + + // The capacity providers associated with the cluster. + CapacityProviders []string + + // The Amazon Resource Name (ARN) that identifies the cluster. For more + // information about the ARN format, see [Amazon Resource Name (ARN)]in the Amazon ECS Developer Guide. + // + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + ClusterArn *string + + // A user-generated string that you use to identify your cluster. + ClusterName *string + + // The execute command configuration for the cluster. + Configuration *ClusterConfiguration + + // The default capacity provider strategy for the cluster. When services or tasks + // are run in the cluster with no launch type or capacity provider strategy + // specified, the default capacity provider strategy is used. + DefaultCapacityProviderStrategy []CapacityProviderStrategyItem + + // The number of tasks in the cluster that are in the PENDING state. + PendingTasksCount int32 + + // The number of container instances registered into the cluster. This includes + // container instances in both ACTIVE and DRAINING status. + RegisteredContainerInstancesCount int32 + + // The number of tasks in the cluster that are in the RUNNING state. + RunningTasksCount int32 + + // Use this parameter to set a default Service Connect namespace. After you set a + // default Service Connect namespace, any new services with Service Connect turned + // on that are created in the cluster are added as client services in the + // namespace. This setting only applies to new services that set the enabled + // parameter to true in the ServiceConnectConfiguration . You can set the namespace + // of each service individually in the ServiceConnectConfiguration to override + // this default parameter. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectDefaults *ClusterServiceConnectDefaults + + // The settings for the cluster. This parameter indicates whether CloudWatch + // Container Insights is on or off for a cluster. + Settings []ClusterSetting + + // Additional information about your clusters that are separated by launch type. + // They include the following: + // + // - runningEC2TasksCount + // + // - RunningFargateTasksCount + // + // - pendingEC2TasksCount + // + // - pendingFargateTasksCount + // + // - activeEC2ServiceCount + // + // - activeFargateServiceCount + // + // - drainingEC2ServiceCount + // + // - drainingFargateServiceCount + Statistics []KeyValuePair + + // The status of the cluster. The following are the possible states that are + // returned. + // + // ACTIVE The cluster is ready to accept tasks and if applicable you can register + // container instances with the cluster. + // + // PROVISIONING The cluster has capacity providers that are associated with it and + // the resources needed for the capacity provider are being created. + // + // DEPROVISIONING The cluster has capacity providers that are associated with it + // and the resources needed for the capacity provider are being deleted. + // + // FAILED The cluster has capacity providers that are associated with it and the + // resources needed for the capacity provider have failed to create. + // + // INACTIVE The cluster has been deleted. Clusters with an INACTIVE status may + // remain discoverable in your account for a period of time. However, this behavior + // is subject to change in the future. We don't recommend that you rely on INACTIVE + // clusters persisting. + Status *string + + // The metadata that you apply to the cluster to help you categorize and organize + // them. Each tag consists of a key and an optional value. You define both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []Tag + + noSmithyDocumentSerde +} + +// The execute command and managed storage configuration for the cluster. +type ClusterConfiguration struct { + + // The details of the execute command configuration. + ExecuteCommandConfiguration *ExecuteCommandConfiguration + + // The details of the managed storage configuration. + ManagedStorageConfiguration *ManagedStorageConfiguration + + noSmithyDocumentSerde +} + +// Use this parameter to set a default Service Connect namespace. After you set a +// default Service Connect namespace, any new services with Service Connect turned +// on that are created in the cluster are added as client services in the +// namespace. This setting only applies to new services that set the enabled +// parameter to true in the ServiceConnectConfiguration . You can set the namespace +// of each service individually in the ServiceConnectConfiguration to override +// this default parameter. +// +// Tasks that run in a namespace can use short names to connect to services in the +// namespace. Tasks can connect to services across all of the clusters in the +// namespace. Tasks connect through a managed proxy container that collects logs +// and metrics for increased visibility. Only the tasks that Amazon ECS services +// create are supported with Service Connect. For more information, see [Service Connect]in the +// Amazon Elastic Container Service Developer Guide. +// +// [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html +type ClusterServiceConnectDefaults struct { + + // The namespace name or full Amazon Resource Name (ARN) of the Cloud Map + // namespace. When you create a service and don't specify a Service Connect + // configuration, this namespace is used. + Namespace *string + + noSmithyDocumentSerde +} + +// Use this parameter to set a default Service Connect namespace. After you set a +// default Service Connect namespace, any new services with Service Connect turned +// on that are created in the cluster are added as client services in the +// namespace. This setting only applies to new services that set the enabled +// parameter to true in the ServiceConnectConfiguration . You can set the namespace +// of each service individually in the ServiceConnectConfiguration to override +// this default parameter. +// +// Tasks that run in a namespace can use short names to connect to services in the +// namespace. Tasks can connect to services across all of the clusters in the +// namespace. Tasks connect through a managed proxy container that collects logs +// and metrics for increased visibility. Only the tasks that Amazon ECS services +// create are supported with Service Connect. For more information, see [Service Connect]in the +// Amazon Elastic Container Service Developer Guide. +// +// [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html +type ClusterServiceConnectDefaultsRequest struct { + + // The namespace name or full Amazon Resource Name (ARN) of the Cloud Map + // namespace that's used when you create a service and don't specify a Service + // Connect configuration. The namespace name can include up to 1024 characters. The + // name is case-sensitive. The name can't include hyphens (-), tilde (~), greater + // than (>), less than (<), or slash (/). + // + // If you enter an existing namespace name or ARN, then that namespace will be + // used. Any namespace type is supported. The namespace must be in this account and + // this Amazon Web Services Region. + // + // If you enter a new name, a Cloud Map namespace will be created. Amazon ECS + // creates a Cloud Map namespace with the "API calls" method of instance discovery + // only. This instance discovery method is the "HTTP" namespace type in the Command + // Line Interface. Other types of instance discovery aren't used by Service + // Connect. + // + // If you update the cluster with an empty string "" for the namespace name, the + // cluster configuration for Service Connect is removed. Note that the namespace + // will remain in Cloud Map and must be deleted separately. + // + // For more information about Cloud Map, see [Working with Services] in the Cloud Map Developer Guide. + // + // [Working with Services]: https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html + // + // This member is required. + Namespace *string + + noSmithyDocumentSerde +} + +// The settings to use when creating a cluster. This parameter is used to turn on +// CloudWatch Container Insights for a cluster. +type ClusterSetting struct { + + // The name of the cluster setting. The value is containerInsights . + Name ClusterSettingName + + // The value to set for the cluster setting. The supported values are enabled and + // disabled . + // + // If you set name to containerInsights and value to enabled , CloudWatch Container + // Insights will be on for the cluster, otherwise it will be off unless the + // containerInsights account setting is turned on. If a cluster value is specified, + // it will override the containerInsights value set with [PutAccountSetting] or [PutAccountSettingDefault]. + // + // [PutAccountSettingDefault]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html + // [PutAccountSetting]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html + Value *string + + noSmithyDocumentSerde +} + +// A Docker container that's part of a task. +type Container struct { + + // The Amazon Resource Name (ARN) of the container. + ContainerArn *string + + // The number of CPU units set for the container. The value is 0 if no value was + // specified in the container definition when the task definition was registered. + Cpu *string + + // The exit code returned from the container. + ExitCode *int32 + + // The IDs of each GPU assigned to the container. + GpuIds []string + + // The health status of the container. If health checks aren't configured for this + // container in its task definition, then it reports the health status as UNKNOWN . + HealthStatus HealthStatus + + // The image used for the container. + Image *string + + // The container image manifest digest. + ImageDigest *string + + // The last known status of the container. + LastStatus *string + + // The details of any Amazon ECS managed agents associated with the container. + ManagedAgents []ManagedAgent + + // The hard limit (in MiB) of memory set for the container. + Memory *string + + // The soft limit (in MiB) of memory set for the container. + MemoryReservation *string + + // The name of the container. + Name *string + + // The network bindings associated with the container. + NetworkBindings []NetworkBinding + + // The network interfaces associated with the container. + NetworkInterfaces []NetworkInterface + + // A short (255 max characters) human-readable string to provide additional + // details about a running or stopped container. + Reason *string + + // The ID of the Docker container. + RuntimeId *string + + // The ARN of the task. + TaskArn *string + + noSmithyDocumentSerde +} + +// Container definitions are used in task definitions to describe the different +// containers that are launched as part of a task. +type ContainerDefinition struct { + + // The command that's passed to the container. This parameter maps to Cmd in the + // docker container create command and the COMMAND parameter to docker run. If + // there are multiple arguments, each argument is a separated string in the array. + Command []string + + // The number of cpu units reserved for the container. This parameter maps to + // CpuShares in the docker container create commandand the --cpu-shares option to + // docker run. + // + // This field is optional for tasks using the Fargate launch type, and the only + // requirement is that the total amount of CPU reserved for all containers within a + // task be lower than the task-level cpu value. + // + // You can determine the number of CPU units that are available per EC2 instance + // type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances]detail page + // by 1,024. + // + // Linux containers share unallocated CPU units with other containers on the + // container instance with the same ratio as their allocated amount. For example, + // if you run a single-container task on a single-core instance type with 512 CPU + // units specified for that container, and that's the only task running on the + // container instance, that container could use the full 1,024 CPU unit share at + // any given time. However, if you launched another copy of the same task on that + // container instance, each task is guaranteed a minimum of 512 CPU units when + // needed. Moreover, each container could float to higher CPU usage if the other + // container was not using it. If both tasks were 100% active all of the time, they + // would be limited to 512 CPU units. + // + // On Linux container instances, the Docker daemon on the container instance uses + // the CPU value to calculate the relative CPU share ratios for running containers. + // The minimum valid CPU share value that the Linux kernel allows is 2, and the + // maximum valid CPU share value that the Linux kernel allows is 262144. However, + // the CPU parameter isn't required, and you can use CPU values below 2 or above + // 262144 in your container definitions. For CPU values below 2 (including null) or + // above 262144, the behavior varies based on your Amazon ECS container agent + // version: + // + // - Agent versions less than or equal to 1.1.0: Null and zero CPU values are + // passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU + // values of 1 are passed to Docker as 1, which the Linux kernel converts to two + // CPU shares. + // + // - Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values + // of 1 are passed to Docker as 2. + // + // - Agent versions greater than or equal to 1.84.0: CPU values greater than 256 + // vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares. + // + // On Windows container instances, the CPU limit is enforced as an absolute limit, + // or a quota. Windows containers only have access to the specified amount of CPU + // that's described in the task definition. A null or zero CPU value is passed to + // Docker as 0 , which Windows interprets as 1% of one CPU. + // + // [Amazon EC2 Instances]: http://aws.amazon.com/ec2/instance-types/ + Cpu int32 + + // A list of ARNs in SSM or Amazon S3 to a credential spec ( CredSpec ) file that + // configures the container for Active Directory authentication. We recommend that + // you use this parameter instead of the dockerSecurityOptions . The maximum number + // of ARNs is 1. + // + // There are two formats for each ARN. + // + // credentialspecdomainless:MyARN You use credentialspecdomainless:MyARN to + // provide a CredSpec with an additional section for a secret in Secrets Manager. + // You provide the login credentials to the domain in the secret. + // + // Each task that runs on any container instance can join different domains. + // + // You can use this format without joining the container instance to a domain. + // + // credentialspec:MyARN You use credentialspec:MyARN to provide a CredSpec for a + // single domain. + // + // You must join the container instance to the domain before you start any tasks + // that use this task definition. + // + // In both formats, replace MyARN with the ARN in SSM or Amazon S3. + // + // If you provide a credentialspecdomainless:MyARN , the credspec must provide a + // ARN in Secrets Manager for a secret containing the username, password, and the + // domain to connect to. For better security, the instance isn't joined to the + // domain for domainless authentication. Other applications on the instance can't + // use the domainless credentials. You can use this parameter to run tasks on the + // same instance, even it the tasks need to join different domains. For more + // information, see [Using gMSAs for Windows Containers]and [Using gMSAs for Linux Containers]. + // + // [Using gMSAs for Windows Containers]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html + // [Using gMSAs for Linux Containers]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html + CredentialSpecs []string + + // The dependencies defined for container startup and shutdown. A container can + // contain multiple dependencies on other containers in a task definition. When a + // dependency is defined for container startup, for container shutdown it is + // reversed. + // + // For tasks using the EC2 launch type, the container instances require at least + // version 1.26.0 of the container agent to turn on container dependencies. + // However, we recommend using the latest container agent version. For information + // about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent]in + // the Amazon Elastic Container Service Developer Guide. If you're using an Amazon + // ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the + // ecs-init package. If your container instances are launched from version 20190301 + // or later, then they contain the required versions of the container agent and + // ecs-init . For more information, see [Amazon ECS-optimized Linux AMI] in the Amazon Elastic Container Service + // Developer Guide. + // + // For tasks using the Fargate launch type, the task or service requires the + // following platforms: + // + // - Linux platform version 1.3.0 or later. + // + // - Windows platform version 1.0.0 or later. + // + // [Updating the Amazon ECS Container Agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html + // [Amazon ECS-optimized Linux AMI]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html + DependsOn []ContainerDependency + + // When this parameter is true, networking is off within the container. This + // parameter maps to NetworkDisabled in the docker container create command. + // + // This parameter is not supported for Windows containers. + DisableNetworking *bool + + // A list of DNS search domains that are presented to the container. This + // parameter maps to DnsSearch in the docker container create command and the + // --dns-search option to docker run. + // + // This parameter is not supported for Windows containers. + DnsSearchDomains []string + + // A list of DNS servers that are presented to the container. This parameter maps + // to Dns in the docker container create command and the --dns option to docker + // run. + // + // This parameter is not supported for Windows containers. + DnsServers []string + + // A key/value map of labels to add to the container. This parameter maps to Labels + // in the docker container create command and the --label option to docker run. + // This parameter requires version 1.18 of the Docker Remote API or greater on your + // container instance. To check the Docker Remote API version on your container + // instance, log in to your container instance and run the following command: sudo + // docker version --format '{{.Server.APIVersion}}' + DockerLabels map[string]string + + // A list of strings to provide custom configuration for multiple security + // systems. This field isn't valid for containers in tasks using the Fargate launch + // type. + // + // For Linux tasks on EC2, this parameter can be used to reference custom labels + // for SELinux and AppArmor multi-level security systems. + // + // For any tasks on EC2, this parameter can be used to reference a credential spec + // file that configures a container for Active Directory authentication. For more + // information, see [Using gMSAs for Windows Containers]and [Using gMSAs for Linux Containers] in the Amazon Elastic Container Service Developer Guide. + // + // This parameter maps to SecurityOpt in the docker container create command and + // the --security-opt option to docker run. + // + // The Amazon ECS container agent running on a container instance must register + // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment + // variables before containers placed on that instance can use these security + // options. For more information, see [Amazon ECS Container Agent Configuration]in the Amazon Elastic Container Service + // Developer Guide. + // + // Valid values: "no-new-privileges" | "apparmor:PROFILE" | "label:value" | + // "credentialspec:CredentialSpecFilePath" + // + // [Using gMSAs for Windows Containers]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html + // [Using gMSAs for Linux Containers]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html + // [Amazon ECS Container Agent Configuration]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html + DockerSecurityOptions []string + + // Early versions of the Amazon ECS container agent don't properly handle + // entryPoint parameters. If you have problems using entryPoint , update your + // container agent or enter your commands and arguments as command array items + // instead. + // + // The entry point that's passed to the container. This parameter maps to + // Entrypoint in the docker container create command and the --entrypoint option + // to docker run. + EntryPoint []string + + // The environment variables to pass to a container. This parameter maps to Env in + // the docker container create command and the --env option to docker run. + // + // We don't recommend that you use plaintext environment variables for sensitive + // information, such as credential data. + Environment []KeyValuePair + + // A list of files containing the environment variables to pass to a container. + // This parameter maps to the --env-file option to docker run. + // + // You can specify up to ten environment files. The file must have a .env file + // extension. Each line in an environment file contains an environment variable in + // VARIABLE=VALUE format. Lines beginning with # are treated as comments and are + // ignored. + // + // If there are environment variables specified using the environment parameter in + // a container definition, they take precedence over the variables contained within + // an environment file. If multiple environment files are specified that contain + // the same variable, they're processed from the top down. We recommend that you + // use unique variable names. For more information, see [Specifying Environment Variables]in the Amazon Elastic + // Container Service Developer Guide. + // + // [Specifying Environment Variables]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html + EnvironmentFiles []EnvironmentFile + + // If the essential parameter of a container is marked as true , and that container + // fails or stops for any reason, all other containers that are part of the task + // are stopped. If the essential parameter of a container is marked as false , its + // failure doesn't affect the rest of the containers in a task. If this parameter + // is omitted, a container is assumed to be essential. + // + // All tasks must have at least one essential container. If you have an + // application that's composed of multiple containers, group containers that are + // used for a common purpose into components, and separate the different components + // into multiple task definitions. For more information, see [Application Architecture]in the Amazon Elastic + // Container Service Developer Guide. + // + // [Application Architecture]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html + Essential *bool + + // A list of hostnames and IP address mappings to append to the /etc/hosts file on + // the container. This parameter maps to ExtraHosts in the docker container create + // command and the --add-host option to docker run. + // + // This parameter isn't supported for Windows containers or tasks that use the + // awsvpc network mode. + ExtraHosts []HostEntry + + // The FireLens configuration for the container. This is used to specify and + // configure a log router for container logs. For more information, see [Custom Log Routing]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Custom Log Routing]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html + FirelensConfiguration *FirelensConfiguration + + // The container health check command and associated configuration parameters for + // the container. This parameter maps to HealthCheck in the docker container + // create command and the HEALTHCHECK parameter of docker run. + HealthCheck *HealthCheck + + // The hostname to use for your container. This parameter maps to Hostname in the + // docker container create command and the --hostname option to docker run. + // + // The hostname parameter is not supported if you're using the awsvpc network mode. + Hostname *string + + // The image used to start a container. This string is passed directly to the + // Docker daemon. By default, images in the Docker Hub registry are available. + // Other repositories are specified with either repository-url/image:tag or + // repository-url/image@digest . Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, underscores, colons, periods, forward slashes, and number + // signs are allowed. This parameter maps to Image in the docker container create + // command and the IMAGE parameter of docker run. + // + // - When a new task starts, the Amazon ECS container agent pulls the latest + // version of the specified image and tag for the container to use. However, + // subsequent updates to a repository image aren't propagated to already running + // tasks. + // + // - Images in Amazon ECR repositories can be specified by either using the full + // registry/repository:tag or registry/repository@digest . For example, + // 012345678910.dkr.ecr..amazonaws.com/:latest or + // 012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE + // . + // + // - Images in official repositories on Docker Hub use a single name (for + // example, ubuntu or mongo ). + // + // - Images in other repositories on Docker Hub are qualified with an + // organization name (for example, amazon/amazon-ecs-agent ). + // + // - Images in other online repositories are qualified further by a domain name + // (for example, quay.io/assemblyline/ubuntu ). + Image *string + + // When this parameter is true , you can deploy containerized applications that + // require stdin or a tty to be allocated. This parameter maps to OpenStdin in the + // docker container create command and the --interactive option to docker run. + Interactive *bool + + // The links parameter allows containers to communicate with each other without + // the need for port mappings. This parameter is only supported if the network mode + // of a task definition is bridge . The name:internalName construct is analogous + // to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), + // numbers, underscores, and hyphens are allowed.. This parameter maps to Links in + // the docker container create command and the --link option to docker run. + // + // This parameter is not supported for Windows containers. + // + // Containers that are collocated on a single container instance may be able to + // communicate with each other without requiring links or host port mappings. + // Network isolation is achieved on the container instance using security groups + // and VPC settings. + Links []string + + // Linux-specific modifications that are applied to the container, such as Linux + // kernel capabilities. For more information see [KernelCapabilities]. + // + // This parameter is not supported for Windows containers. + // + // [KernelCapabilities]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html + LinuxParameters *LinuxParameters + + // The log configuration specification for the container. + // + // This parameter maps to LogConfig in the docker container create command and the + // --log-driver option to docker run. By default, containers use the same logging + // driver that the Docker daemon uses. However the container can use a different + // logging driver than the Docker daemon by specifying a log driver with this + // parameter in the container definition. To use a different logging driver for a + // container, the log system must be configured properly on the container instance + // (or on a different log server for remote logging options). + // + // Amazon ECS currently supports a subset of the logging drivers available to the + // Docker daemon (shown in the [LogConfiguration]data type). Additional log drivers may be available + // in future releases of the Amazon ECS container agent. + // + // This parameter requires version 1.18 of the Docker Remote API or greater on + // your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version --format '{{.Server.APIVersion}}' + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the + // ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on + // that instance can use these log configuration options. For more information, see + // [Amazon ECS Container Agent Configuration]in the Amazon Elastic Container Service Developer Guide. + // + // [LogConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html + // [Amazon ECS Container Agent Configuration]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html + LogConfiguration *LogConfiguration + + // The amount (in MiB) of memory to present to the container. If your container + // attempts to exceed the memory specified here, the container is killed. The total + // amount of memory reserved for all containers within a task must be lower than + // the task memory value, if one is specified. This parameter maps to Memory in + // the docker container create command and the --memory option to docker run. + // + // If using the Fargate launch type, this parameter is optional. + // + // If using the EC2 launch type, you must specify either a task-level memory value + // or a container-level memory value. If you specify both a container-level memory + // and memoryReservation value, memory must be greater than memoryReservation . If + // you specify memoryReservation , then that value is subtracted from the available + // memory resources for the container instance where the container is placed. + // Otherwise, the value of memory is used. + // + // The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a + // container. So, don't specify less than 6 MiB of memory for your containers. + // + // The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory + // for a container. So, don't specify less than 4 MiB of memory for your + // containers. + Memory *int32 + + // The soft limit (in MiB) of memory to reserve for the container. When system + // memory is under heavy contention, Docker attempts to keep the container memory + // to this soft limit. However, your container can consume more memory when it + // needs to, up to either the hard limit specified with the memory parameter (if + // applicable), or all of the available memory on the container instance, whichever + // comes first. This parameter maps to MemoryReservation in the docker container + // create command and the --memory-reservation option to docker run. + // + // If a task-level memory value is not specified, you must specify a non-zero + // integer for one or both of memory or memoryReservation in a container + // definition. If you specify both, memory must be greater than memoryReservation . + // If you specify memoryReservation , then that value is subtracted from the + // available memory resources for the container instance where the container is + // placed. Otherwise, the value of memory is used. + // + // For example, if your container normally uses 128 MiB of memory, but + // occasionally bursts to 256 MiB of memory for short periods of time, you can set + // a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This + // configuration would allow the container to only reserve 128 MiB of memory from + // the remaining resources on the container instance, but also allow the container + // to consume more memory resources when needed. + // + // The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a + // container. So, don't specify less than 6 MiB of memory for your containers. + // + // The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory + // for a container. So, don't specify less than 4 MiB of memory for your + // containers. + MemoryReservation *int32 + + // The mount points for data volumes in your container. + // + // This parameter maps to Volumes in the docker container create command and the + // --volume option to docker run. + // + // Windows containers can mount whole directories on the same drive as + // $env:ProgramData . Windows containers can't mount directories on a different + // drive, and mount point can't be across drives. + MountPoints []MountPoint + + // The name of a container. If you're linking multiple containers together in a + // task definition, the name of one container can be entered in the links of + // another container to connect the containers. Up to 255 letters (uppercase and + // lowercase), numbers, underscores, and hyphens are allowed. This parameter maps + // to name in the docker container create command and the --name option to docker + // run. + Name *string + + // The list of port mappings for the container. Port mappings allow containers to + // access ports on the host container instance to send or receive traffic. + // + // For task definitions that use the awsvpc network mode, only specify the + // containerPort . The hostPort can be left blank or it must be the same value as + // the containerPort . + // + // Port mappings on Windows use the NetNAT gateway address rather than localhost . + // There's no loopback for port mappings on Windows, so you can't access a + // container's mapped port from the host itself. + // + // This parameter maps to PortBindings in the the docker container create command + // and the --publish option to docker run. If the network mode of a task + // definition is set to none , then you can't specify port mappings. If the network + // mode of a task definition is set to host , then host ports must either be + // undefined or they must match the container port in the port mapping. + // + // After a task reaches the RUNNING status, manual and automatic host and + // container port assignments are visible in the Network Bindings section of a + // container description for a selected task in the Amazon ECS console. The + // assignments are also visible in the networkBindings section [DescribeTasks] responses. + // + // [DescribeTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html + PortMappings []PortMapping + + // When this parameter is true, the container is given elevated privileges on the + // host container instance (similar to the root user). This parameter maps to + // Privileged in the docker container create command and the --privileged option + // to docker run + // + // This parameter is not supported for Windows containers or tasks run on Fargate. + Privileged *bool + + // When this parameter is true , a TTY is allocated. This parameter maps to Tty in + // the docker container create command and the --tty option to docker run. + PseudoTerminal *bool + + // When this parameter is true, the container is given read-only access to its + // root file system. This parameter maps to ReadonlyRootfs in the docker container + // create command and the --read-only option to docker run. + // + // This parameter is not supported for Windows containers. + ReadonlyRootFilesystem *bool + + // The private repository authentication credentials to use. + RepositoryCredentials *RepositoryCredentials + + // The type and amount of a resource to assign to a container. The only supported + // resource is a GPU. + ResourceRequirements []ResourceRequirement + + // The restart policy for a container. When you set up a restart policy, Amazon + // ECS can restart the container without needing to replace the task. For more + // information, see [Restart individual containers in Amazon ECS tasks with container restart policies]in the Amazon Elastic Container Service Developer Guide. + // + // [Restart individual containers in Amazon ECS tasks with container restart policies]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-restart-policy.html + RestartPolicy *ContainerRestartPolicy + + // The secrets to pass to the container. For more information, see [Specifying Sensitive Data] in the Amazon + // Elastic Container Service Developer Guide. + // + // [Specifying Sensitive Data]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html + Secrets []Secret + + // Time duration (in seconds) to wait before giving up on resolving dependencies + // for a container. For example, you specify two containers in a task definition + // with containerA having a dependency on containerB reaching a COMPLETE , SUCCESS + // , or HEALTHY status. If a startTimeout value is specified for containerB and it + // doesn't reach the desired status within that time then containerA gives up and + // not start. This results in the task transitioning to a STOPPED state. + // + // When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is + // used, it's enforced independently from this start timeout value. + // + // For tasks using the Fargate launch type, the task or service requires the + // following platforms: + // + // - Linux platform version 1.3.0 or later. + // + // - Windows platform version 1.0.0 or later. + // + // For tasks using the EC2 launch type, your container instances require at least + // version 1.26.0 of the container agent to use a container start timeout value. + // However, we recommend using the latest container agent version. For information + // about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent]in + // the Amazon Elastic Container Service Developer Guide. If you're using an Amazon + // ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the + // ecs-init package. If your container instances are launched from version 20190301 + // or later, then they contain the required versions of the container agent and + // ecs-init . For more information, see [Amazon ECS-optimized Linux AMI] in the Amazon Elastic Container Service + // Developer Guide. + // + // The valid values for Fargate are 2-120 seconds. + // + // [Updating the Amazon ECS Container Agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html + // [Amazon ECS-optimized Linux AMI]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html + StartTimeout *int32 + + // Time duration (in seconds) to wait before the container is forcefully killed if + // it doesn't exit normally on its own. + // + // For tasks using the Fargate launch type, the task or service requires the + // following platforms: + // + // - Linux platform version 1.3.0 or later. + // + // - Windows platform version 1.0.0 or later. + // + // For tasks that use the Fargate launch type, the max stop timeout value is 120 + // seconds and if the parameter is not specified, the default value of 30 seconds + // is used. + // + // For tasks that use the EC2 launch type, if the stopTimeout parameter isn't + // specified, the value set for the Amazon ECS container agent configuration + // variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout + // parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are + // set, then the default values of 30 seconds for Linux containers and 30 seconds + // on Windows containers are used. Your container instances require at least + // version 1.26.0 of the container agent to use a container stop timeout value. + // However, we recommend using the latest container agent version. For information + // about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent]in + // the Amazon Elastic Container Service Developer Guide. If you're using an Amazon + // ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the + // ecs-init package. If your container instances are launched from version 20190301 + // or later, then they contain the required versions of the container agent and + // ecs-init . For more information, see [Amazon ECS-optimized Linux AMI] in the Amazon Elastic Container Service + // Developer Guide. + // + // The valid values for Fargate are 2-120 seconds. + // + // [Updating the Amazon ECS Container Agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html + // [Amazon ECS-optimized Linux AMI]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html + StopTimeout *int32 + + // A list of namespaced kernel parameters to set in the container. This parameter + // maps to Sysctls in the docker container create command and the --sysctl option + // to docker run. For example, you can configure net.ipv4.tcp_keepalive_time + // setting to maintain longer lived connections. + SystemControls []SystemControl + + // A list of ulimits to set in the container. If a ulimit value is specified in a + // task definition, it overrides the default values set by Docker. This parameter + // maps to Ulimits in the docker container create command and the --ulimit option + // to docker run. Valid naming values are displayed in the [Ulimit]data type. + // + // Amazon ECS tasks hosted on Fargate use the default resource limit values set by + // the operating system with the exception of the nofile resource limit parameter + // which Fargate overrides. The nofile resource limit sets a restriction on the + // number of open files that a container can use. The default nofile soft limit is + // 65535 and the default hard limit is 65535 . + // + // This parameter requires version 1.18 of the Docker Remote API or greater on + // your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version --format '{{.Server.APIVersion}}' + // + // This parameter is not supported for Windows containers. + // + // [Ulimit]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Ulimit.html + Ulimits []Ulimit + + // The user to use inside the container. This parameter maps to User in the docker + // container create command and the --user option to docker run. + // + // When running tasks using the host network mode, don't run containers using the + // root user (UID 0). We recommend using a non-root user for better security. + // + // You can specify the user using the following formats. If specifying a UID or + // GID, you must specify it as a positive integer. + // + // - user + // + // - user:group + // + // - uid + // + // - uid:gid + // + // - user:gid + // + // - uid:group + // + // This parameter is not supported for Windows containers. + User *string + + // Data volumes to mount from another container. This parameter maps to VolumesFrom + // in the docker container create command and the --volumes-from option to docker + // run. + VolumesFrom []VolumeFrom + + // The working directory to run commands inside the container in. This parameter + // maps to WorkingDir in the docker container create command and the --workdir + // option to docker run. + WorkingDirectory *string + + noSmithyDocumentSerde +} + +// The dependencies defined for container startup and shutdown. A container can +// contain multiple dependencies. When a dependency is defined for container +// startup, for container shutdown it is reversed. +// +// Your Amazon ECS container instances require at least version 1.26.0 of the +// container agent to use container dependencies. However, we recommend using the +// latest container agent version. For information about checking your agent +// version and updating to the latest version, see [Updating the Amazon ECS Container Agent]in the Amazon Elastic Container +// Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your +// instance needs at least version 1.26.0-1 of the ecs-init package. If your +// container instances are launched from version 20190301 or later, then they +// contain the required versions of the container agent and ecs-init . For more +// information, see [Amazon ECS-optimized Linux AMI]in the Amazon Elastic Container Service Developer Guide. +// +// For tasks that use the Fargate launch type, the task or service requires the +// following platforms: +// +// - Linux platform version 1.3.0 or later. +// +// - Windows platform version 1.0.0 or later. +// +// For more information about how to create a container dependency, see [Container dependency] in the +// Amazon Elastic Container Service Developer Guide. +// +// [Updating the Amazon ECS Container Agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html +// [Amazon ECS-optimized Linux AMI]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html +// [Container dependency]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/example_task_definitions.html#example_task_definition-containerdependency +type ContainerDependency struct { + + // The dependency condition of the container. The following are the available + // conditions and their behavior: + // + // - START - This condition emulates the behavior of links and volumes today. It + // validates that a dependent container is started before permitting other + // containers to start. + // + // - COMPLETE - This condition validates that a dependent container runs to + // completion (exits) before permitting other containers to start. This can be + // useful for nonessential containers that run a script and then exit. This + // condition can't be set on an essential container. + // + // - SUCCESS - This condition is the same as COMPLETE , but it also requires that + // the container exits with a zero status. This condition can't be set on an + // essential container. + // + // - HEALTHY - This condition validates that the dependent container passes its + // Docker health check before permitting other containers to start. This requires + // that the dependent container has health checks configured. This condition is + // confirmed only at task startup. + // + // This member is required. + Condition ContainerCondition + + // The name of a container. + // + // This member is required. + ContainerName *string + + noSmithyDocumentSerde +} + +// An Amazon EC2 or External instance that's running the Amazon ECS agent and has +// been registered with a cluster. +type ContainerInstance struct { + + // This parameter returns true if the agent is connected to Amazon ECS. An + // instance with an agent that may be unhealthy or stopped return false . Only + // instances connected to an agent can accept task placement requests. + AgentConnected bool + + // The status of the most recent agent update. If an update wasn't ever requested, + // this value is NULL . + AgentUpdateStatus AgentUpdateStatus + + // The resources attached to a container instance, such as an elastic network + // interface. + Attachments []Attachment + + // The attributes set for the container instance, either by the Amazon ECS + // container agent at instance registration or manually with the [PutAttributes]operation. + // + // [PutAttributes]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAttributes.html + Attributes []Attribute + + // The capacity provider that's associated with the container instance. + CapacityProviderName *string + + // The Amazon Resource Name (ARN) of the container instance. For more information + // about the ARN format, see [Amazon Resource Name (ARN)]in the Amazon ECS Developer Guide. + // + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + ContainerInstanceArn *string + + // The ID of the container instance. For Amazon EC2 instances, this value is the + // Amazon EC2 instance ID. For external instances, this value is the Amazon Web + // Services Systems Manager managed instance ID. + Ec2InstanceId *string + + // An object representing the health status of the container instance. + HealthStatus *ContainerInstanceHealthStatus + + // The number of tasks on the container instance that are in the PENDING status. + PendingTasksCount int32 + + // The Unix timestamp for the time when the container instance was registered. + RegisteredAt *time.Time + + // For CPU and memory resource types, this parameter describes the amount of each + // resource that was available on the container instance when the container agent + // registered it with Amazon ECS. This value represents the total amount of CPU and + // memory that can be allocated on this container instance to tasks. For port + // resource types, this parameter describes the ports that were reserved by the + // Amazon ECS container agent when it registered the container instance with Amazon + // ECS. + RegisteredResources []Resource + + // For CPU and memory resource types, this parameter describes the remaining CPU + // and memory that wasn't already allocated to tasks and is therefore available for + // new tasks. For port resource types, this parameter describes the ports that were + // reserved by the Amazon ECS container agent (at instance registration time) and + // any task containers that have reserved port mappings on the host (with the host + // or bridge network mode). Any port that's not specified here is available for + // new tasks. + RemainingResources []Resource + + // The number of tasks on the container instance that have a desired status ( + // desiredStatus ) of RUNNING . + RunningTasksCount int32 + + // The status of the container instance. The valid values are REGISTERING , + // REGISTRATION_FAILED , ACTIVE , INACTIVE , DEREGISTERING , or DRAINING . + // + // If your account has opted in to the awsvpcTrunking account setting, then any + // newly registered container instance will transition to a REGISTERING status + // while the trunk elastic network interface is provisioned for the instance. If + // the registration fails, the instance will transition to a REGISTRATION_FAILED + // status. You can describe the container instance and see the reason for failure + // in the statusReason parameter. Once the container instance is terminated, the + // instance transitions to a DEREGISTERING status while the trunk elastic network + // interface is deprovisioned. The instance then transitions to an INACTIVE status. + // + // The ACTIVE status indicates that the container instance can accept tasks. The + // DRAINING indicates that new tasks aren't placed on the container instance and + // any service tasks running on the container instance are removed if possible. For + // more information, see [Container instance draining]in the Amazon Elastic Container Service Developer Guide. + // + // [Container instance draining]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-draining.html + Status *string + + // The reason that the container instance reached its current status. + StatusReason *string + + // The metadata that you apply to the container instance to help you categorize + // and organize them. Each tag consists of a key and an optional value. You define + // both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []Tag + + // The version counter for the container instance. Every time a container instance + // experiences a change that triggers a CloudWatch event, the version counter is + // incremented. If you're replicating your Amazon ECS container instance state with + // CloudWatch Events, you can compare the version of a container instance reported + // by the Amazon ECS APIs with the version reported in CloudWatch Events for the + // container instance (inside the detail object) to verify that the version in + // your event stream is current. + Version int64 + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo + + noSmithyDocumentSerde +} + +// An object representing the health status of the container instance. +type ContainerInstanceHealthStatus struct { + + // An array of objects representing the details of the container instance health + // status. + Details []InstanceHealthCheckResult + + // The overall health status of the container instance. This is an aggregate + // status of all container instance health checks. + OverallStatus InstanceHealthCheckState + + noSmithyDocumentSerde +} + +// The overrides that are sent to a container. An empty container override can be +// passed in. An example of an empty container override is {"containerOverrides": +// [ ] } . If a non-empty container override is specified, the name parameter must +// be included. +// +// You can use Secrets Manager or Amazon Web Services Systems Manager Parameter +// Store to store the sensitive data. For more information, see [Retrieve secrets through environment variables]in the Amazon ECS +// Developer Guide. +// +// [Retrieve secrets through environment variables]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/secrets-envvar.html +type ContainerOverride struct { + + // The command to send to the container that overrides the default command from + // the Docker image or the task definition. You must also specify a container name. + Command []string + + // The number of cpu units reserved for the container, instead of the default + // value from the task definition. You must also specify a container name. + Cpu *int32 + + // The environment variables to send to the container. You can add new environment + // variables, which are added to the container at launch, or you can override the + // existing environment variables from the Docker image or the task definition. You + // must also specify a container name. + Environment []KeyValuePair + + // A list of files containing the environment variables to pass to a container, + // instead of the value from the container definition. + EnvironmentFiles []EnvironmentFile + + // The hard limit (in MiB) of memory to present to the container, instead of the + // default value from the task definition. If your container attempts to exceed the + // memory specified here, the container is killed. You must also specify a + // container name. + Memory *int32 + + // The soft limit (in MiB) of memory to reserve for the container, instead of the + // default value from the task definition. You must also specify a container name. + MemoryReservation *int32 + + // The name of the container that receives the override. This parameter is + // required if any override is specified. + Name *string + + // The type and amount of a resource to assign to a container, instead of the + // default value from the task definition. The only supported resource is a GPU. + ResourceRequirements []ResourceRequirement + + noSmithyDocumentSerde +} + +// You can enable a restart policy for each container defined in your task +// definition, to overcome transient failures faster and maintain task +// availability. When you enable a restart policy for a container, Amazon ECS can +// restart the container if it exits, without needing to replace the task. For more +// information, see [Restart individual containers in Amazon ECS tasks with container restart policies]in the Amazon Elastic Container Service Developer Guide. +// +// [Restart individual containers in Amazon ECS tasks with container restart policies]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-restart-policy.html +type ContainerRestartPolicy struct { + + // Specifies whether a restart policy is enabled for the container. + // + // This member is required. + Enabled *bool + + // A list of exit codes that Amazon ECS will ignore and not attempt a restart on. + // You can specify a maximum of 50 container exit codes. By default, Amazon ECS + // does not ignore any exit codes. + IgnoredExitCodes []int32 + + // A period of time (in seconds) that the container must run for before a restart + // can be attempted. A container can be restarted only once every + // restartAttemptPeriod seconds. If a container isn't able to run for this time + // period and exits early, it will not be restarted. You can set a minimum + // restartAttemptPeriod of 60 seconds and a maximum restartAttemptPeriod of 1800 + // seconds. By default, a container must run for 300 seconds before it can be + // restarted. + RestartAttemptPeriod *int32 + + noSmithyDocumentSerde +} + +// An object that represents a change in state for a container. +type ContainerStateChange struct { + + // The name of the container. + ContainerName *string + + // The exit code for the container, if the state change is a result of the + // container exiting. + ExitCode *int32 + + // The container image SHA 256 digest. + ImageDigest *string + + // Any network bindings that are associated with the container. + NetworkBindings []NetworkBinding + + // The reason for the state change. + Reason *string + + // The ID of the Docker container. + RuntimeId *string + + // The status of the container. + Status *string + + noSmithyDocumentSerde +} + +// The details of an Amazon ECS service deployment. This is used only when a +// service uses the ECS deployment controller type. +type Deployment struct { + + // The capacity provider strategy that the deployment is using. + CapacityProviderStrategy []CapacityProviderStrategyItem + + // The Unix timestamp for the time when the service deployment was created. + CreatedAt *time.Time + + // The most recent desired count of tasks that was specified for the service to + // deploy or maintain. + DesiredCount int32 + + // The number of consecutively failed tasks in the deployment. A task is + // considered a failure if the service scheduler can't launch the task, the task + // doesn't transition to a RUNNING state, or if it fails any of its defined health + // checks and is stopped. + // + // Once a service deployment has one or more successfully running tasks, the + // failed task count resets to zero and stops being evaluated. + FailedTasks int32 + + // The Fargate ephemeral storage settings for the deployment. + FargateEphemeralStorage *DeploymentEphemeralStorage + + // The ID of the deployment. + Id *string + + // The launch type the tasks in the service are using. For more information, see [Amazon ECS Launch Types] + // in the Amazon Elastic Container Service Developer Guide. + // + // [Amazon ECS Launch Types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + LaunchType LaunchType + + // The VPC subnet and security group configuration for tasks that receive their + // own elastic network interface by using the awsvpc networking mode. + NetworkConfiguration *NetworkConfiguration + + // The number of tasks in the deployment that are in the PENDING status. + PendingCount int32 + + // The operating system that your tasks in the service, or tasks are running on. A + // platform family is specified only for tasks using the Fargate launch type. + // + // All tasks that run as part of this service must use the same platformFamily + // value as the service, for example, LINUX. . + PlatformFamily *string + + // The platform version that your tasks in the service run on. A platform version + // is only specified for tasks using the Fargate launch type. If one isn't + // specified, the LATEST platform version is used. For more information, see [Fargate Platform Versions] in + // the Amazon Elastic Container Service Developer Guide. + // + // [Fargate Platform Versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // The rolloutState of a service is only returned for services that use the + // rolling update ( ECS ) deployment type that aren't behind a Classic Load + // Balancer. + // + // The rollout state of the deployment. When a service deployment is started, it + // begins in an IN_PROGRESS state. When the service reaches a steady state, the + // deployment transitions to a COMPLETED state. If the service fails to reach a + // steady state and circuit breaker is turned on, the deployment transitions to a + // FAILED state. A deployment in FAILED state doesn't launch any new tasks. For + // more information, see [DeploymentCircuitBreaker]. + // + // [DeploymentCircuitBreaker]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentCircuitBreaker.html + RolloutState DeploymentRolloutState + + // A description of the rollout state of a deployment. + RolloutStateReason *string + + // The number of tasks in the deployment that are in the RUNNING status. + RunningCount int32 + + // The details of the Service Connect configuration that's used by this + // deployment. Compare the configuration between multiple deployments when + // troubleshooting issues with new deployments. + // + // The configuration for this service to discover and connect to services, and be + // discovered by, and connected from, other services within a namespace. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + ServiceConnectConfiguration *ServiceConnectConfiguration + + // The list of Service Connect resources that are associated with this deployment. + // Each list entry maps a discovery name to a Cloud Map service name. + ServiceConnectResources []ServiceConnectServiceResource + + // The status of the deployment. The following describes each state. + // + // PRIMARY The most recent deployment of a service. + // + // ACTIVE A service deployment that still has running tasks, but are in the + // process of being replaced with a new PRIMARY deployment. + // + // INACTIVE A deployment that has been completely replaced. + Status *string + + // The most recent task definition that was specified for the tasks in the service + // to use. + TaskDefinition *string + + // The Unix timestamp for the time when the service deployment was last updated. + UpdatedAt *time.Time + + // The details of the volume that was configuredAtLaunch . You can configure + // different settings like the size, throughput, volumeType, and ecryption in [ServiceManagedEBSVolumeConfiguration]. + // The name of the volume must match the name from the task definition. + // + // [ServiceManagedEBSVolumeConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ServiceManagedEBSVolumeConfiguration.html + VolumeConfigurations []ServiceVolumeConfiguration + + noSmithyDocumentSerde +} + +// One of the methods which provide a way for you to quickly identify when a +// deployment has failed, and then to optionally roll back the failure to the last +// working deployment. +// +// When the alarms are generated, Amazon ECS sets the service deployment to +// failed. Set the rollback parameter to have Amazon ECS to roll back your service +// to the last completed deployment after a failure. +// +// You can only use the DeploymentAlarms method to detect failures when the +// DeploymentController is set to ECS (rolling update). +// +// For more information, see [Rolling update] in the Amazon Elastic Container Service Developer +// Guide . +// +// [Rolling update]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html +type DeploymentAlarms struct { + + // One or more CloudWatch alarm names. Use a "," to separate the alarms. + // + // This member is required. + AlarmNames []string + + // Determines whether to use the CloudWatch alarm option in the service deployment + // process. + // + // This member is required. + Enable bool + + // Determines whether to configure Amazon ECS to roll back the service if a + // service deployment fails. If rollback is used, when a service deployment fails, + // the service is rolled back to the last deployment that completed successfully. + // + // This member is required. + Rollback bool + + noSmithyDocumentSerde +} + +// The deployment circuit breaker can only be used for services using the rolling +// update ( ECS ) deployment type. +// +// The deployment circuit breaker determines whether a service deployment will +// fail if the service can't reach a steady state. If it is turned on, a service +// deployment will transition to a failed state and stop launching new tasks. You +// can also configure Amazon ECS to roll back your service to the last completed +// deployment after a failure. For more information, see [Rolling update]in the Amazon Elastic +// Container Service Developer Guide. +// +// For more information about API failure reasons, see [API failure reasons] in the Amazon Elastic +// Container Service Developer Guide. +// +// [API failure reasons]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html +// [Rolling update]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html +type DeploymentCircuitBreaker struct { + + // Determines whether to use the deployment circuit breaker logic for the service. + // + // This member is required. + Enable bool + + // Determines whether to configure Amazon ECS to roll back the service if a + // service deployment fails. If rollback is on, when a service deployment fails, + // the service is rolled back to the last deployment that completed successfully. + // + // This member is required. + Rollback bool + + noSmithyDocumentSerde +} + +// Optional deployment parameters that control how many tasks run during a +// deployment and the ordering of stopping and starting tasks. +type DeploymentConfiguration struct { + + // Information about the CloudWatch alarms. + Alarms *DeploymentAlarms + + // The deployment circuit breaker can only be used for services using the rolling + // update ( ECS ) deployment type. + // + // The deployment circuit breaker determines whether a service deployment will + // fail if the service can't reach a steady state. If you use the deployment + // circuit breaker, a service deployment will transition to a failed state and stop + // launching new tasks. If you use the rollback option, when a service deployment + // fails, the service is rolled back to the last deployment that completed + // successfully. For more information, see [Rolling update]in the Amazon Elastic Container Service + // Developer Guide + // + // [Rolling update]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html + DeploymentCircuitBreaker *DeploymentCircuitBreaker + + // If a service is using the rolling update ( ECS ) deployment type, the + // maximumPercent parameter represents an upper limit on the number of your + // service's tasks that are allowed in the RUNNING or PENDING state during a + // deployment, as a percentage of the desiredCount (rounded down to the nearest + // integer). This parameter enables you to define the deployment batch size. For + // example, if your service is using the REPLICA service scheduler and has a + // desiredCount of four tasks and a maximumPercent value of 200%, the scheduler + // may start four new tasks before stopping the four older tasks (provided that the + // cluster resources required to do this are available). The default maximumPercent + // value for a service using the REPLICA service scheduler is 200%. + // + // If a service is using either the blue/green ( CODE_DEPLOY ) or EXTERNAL + // deployment types, and tasks in the service use the EC2 launch type, the maximum + // percent value is set to the default value. The maximum percent value is used to + // define the upper limit on the number of the tasks in the service that remain in + // the RUNNING state while the container instances are in the DRAINING state. + // + // You can't specify a custom maximumPercent value for a service that uses either + // the blue/green ( CODE_DEPLOY ) or EXTERNAL deployment types and has tasks that + // use the EC2 launch type. + // + // If the tasks in the service use the Fargate launch type, the maximum percent + // value is not used, although it is returned when describing your service. + MaximumPercent *int32 + + // If a service is using the rolling update ( ECS ) deployment type, the + // minimumHealthyPercent represents a lower limit on the number of your service's + // tasks that must remain in the RUNNING state during a deployment, as a + // percentage of the desiredCount (rounded up to the nearest integer). This + // parameter enables you to deploy without using additional cluster capacity. For + // example, if your service has a desiredCount of four tasks and a + // minimumHealthyPercent of 50%, the service scheduler may stop two existing tasks + // to free up cluster capacity before starting two new tasks. + // + // For services that do not use a load balancer, the following should be noted: + // + // - A service is considered healthy if all essential containers within the + // tasks in the service pass their health checks. + // + // - If a task has no essential containers with a health check defined, the + // service scheduler will wait for 40 seconds after a task reaches a RUNNING + // state before the task is counted towards the minimum healthy percent total. + // + // - If a task has one or more essential containers with a health check defined, + // the service scheduler will wait for the task to reach a healthy status before + // counting it towards the minimum healthy percent total. A task is considered + // healthy when all essential containers within the task have passed their health + // checks. The amount of time the service scheduler can wait for is determined by + // the container health check settings. + // + // For services that do use a load balancer, the following should be noted: + // + // - If a task has no essential containers with a health check defined, the + // service scheduler will wait for the load balancer target group health check to + // return a healthy status before counting the task towards the minimum healthy + // percent total. + // + // - If a task has an essential container with a health check defined, the + // service scheduler will wait for both the task to reach a healthy status and the + // load balancer target group health check to return a healthy status before + // counting the task towards the minimum healthy percent total. + // + // The default value for a replica service for minimumHealthyPercent is 100%. The + // default minimumHealthyPercent value for a service using the DAEMON service + // schedule is 0% for the CLI, the Amazon Web Services SDKs, and the APIs and 50% + // for the Amazon Web Services Management Console. + // + // The minimum number of healthy tasks during a deployment is the desiredCount + // multiplied by the minimumHealthyPercent /100, rounded up to the nearest integer + // value. + // + // If a service is using either the blue/green ( CODE_DEPLOY ) or EXTERNAL + // deployment types and is running tasks that use the EC2 launch type, the minimum + // healthy percent value is set to the default value. The minimum healthy percent + // value is used to define the lower limit on the number of the tasks in the + // service that remain in the RUNNING state while the container instances are in + // the DRAINING state. + // + // You can't specify a custom minimumHealthyPercent value for a service that uses + // either the blue/green ( CODE_DEPLOY ) or EXTERNAL deployment types and has + // tasks that use the EC2 launch type. + // + // If a service is using either the blue/green ( CODE_DEPLOY ) or EXTERNAL + // deployment types and is running tasks that use the Fargate launch type, the + // minimum healthy percent value is not used, although it is returned when + // describing your service. + MinimumHealthyPercent *int32 + + noSmithyDocumentSerde +} + +// The deployment controller to use for the service. For more information, see [Amazon ECS deployment types] in +// the Amazon Elastic Container Service Developer Guide. +// +// [Amazon ECS deployment types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html +type DeploymentController struct { + + // The deployment controller type to use. + // + // There are three deployment controller types available: + // + // ECS The rolling update ( ECS ) deployment type involves replacing the current + // running version of the container with the latest version. The number of + // containers Amazon ECS adds or removes from the service during a rolling update + // is controlled by adjusting the minimum and maximum number of healthy tasks + // allowed during a service deployment, as specified in the [DeploymentConfiguration]. + // + // CODE_DEPLOY The blue/green ( CODE_DEPLOY ) deployment type uses the blue/green + // deployment model powered by CodeDeploy, which allows you to verify a new + // deployment of a service before sending production traffic to it. + // + // EXTERNAL The external ( EXTERNAL ) deployment type enables you to use any + // third-party deployment controller for full control over the deployment process + // for an Amazon ECS service. + // + // [DeploymentConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html + // + // This member is required. + Type DeploymentControllerType + + noSmithyDocumentSerde +} + +// The amount of ephemeral storage to allocate for the deployment. +type DeploymentEphemeralStorage struct { + + // Specify an Key Management Service key ID to encrypt the ephemeral storage for + // deployment. + KmsKeyId *string + + noSmithyDocumentSerde +} + +// An object representing a container instance host device. +type Device struct { + + // The path for the device on the host container instance. + // + // This member is required. + HostPath *string + + // The path inside the container at which to expose the host device. + ContainerPath *string + + // The explicit permissions to provide to the container for the device. By + // default, the container has permissions for read , write , and mknod for the + // device. + Permissions []DeviceCgroupPermission + + noSmithyDocumentSerde +} + +// This parameter is specified when you're using Docker volumes. Docker volumes +// are only supported when you're using the EC2 launch type. Windows containers +// only support the use of the local driver. To use bind mounts, specify a host +// instead. +type DockerVolumeConfiguration struct { + + // If this value is true , the Docker volume is created if it doesn't already exist. + // + // This field is only used if the scope is shared . + Autoprovision *bool + + // The Docker volume driver to use. The driver value must match the driver name + // provided by Docker because it is used for task placement. If the driver was + // installed using the Docker plugin CLI, use docker plugin ls to retrieve the + // driver name from your container instance. If the driver was installed using + // another method, use Docker plugin discovery to retrieve the driver name. This + // parameter maps to Driver in the docker container create command and the xxdriver + // option to docker volume create. + Driver *string + + // A map of Docker driver-specific options passed through. This parameter maps to + // DriverOpts in the docker create-volume command and the xxopt option to docker + // volume create. + DriverOpts map[string]string + + // Custom metadata to add to your Docker volume. This parameter maps to Labels in + // the docker container create command and the xxlabel option to docker volume + // create. + Labels map[string]string + + // The scope for the Docker volume that determines its lifecycle. Docker volumes + // that are scoped to a task are automatically provisioned when the task starts + // and destroyed when the task stops. Docker volumes that are scoped as shared + // persist after the task stops. + Scope Scope + + noSmithyDocumentSerde +} + +// The tag specifications of an Amazon EBS volume. +type EBSTagSpecification struct { + + // The type of volume resource. + // + // This member is required. + ResourceType EBSResourceType + + // Determines whether to propagate the tags from the task definition to 
the + // Amazon EBS volume. Tags can only propagate to a SERVICE specified in 
 + // ServiceVolumeConfiguration . If no value is specified, the tags aren't + // 
propagated. + PropagateTags PropagateTags + + // The tags applied to this Amazon EBS volume. AmazonECSCreated and + // AmazonECSManaged are reserved tags that can't be used. + Tags []Tag + + noSmithyDocumentSerde +} + +// The authorization configuration details for the Amazon EFS file system. +type EFSAuthorizationConfig struct { + + // The Amazon EFS access point ID to use. If an access point is specified, the + // root directory value specified in the EFSVolumeConfiguration must either be + // omitted or set to / which will enforce the path set on the EFS access point. If + // an access point is used, transit encryption must be on in the + // EFSVolumeConfiguration . For more information, see [Working with Amazon EFS access points] in the Amazon Elastic File + // System User Guide. + // + // [Working with Amazon EFS access points]: https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html + AccessPointId *string + + // Determines whether to use the Amazon ECS task role defined in a task definition + // when mounting the Amazon EFS file system. If it is turned on, transit encryption + // must be turned on in the EFSVolumeConfiguration . If this parameter is omitted, + // the default value of DISABLED is used. For more information, see [Using Amazon EFS access points] in the Amazon + // Elastic Container Service Developer Guide. + // + // [Using Amazon EFS access points]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html#efs-volume-accesspoints + Iam EFSAuthorizationConfigIAM + + noSmithyDocumentSerde +} + +// This parameter is specified when you're using an Amazon Elastic File System +// file system for task storage. For more information, see [Amazon EFS volumes]in the Amazon Elastic +// Container Service Developer Guide. +// +// [Amazon EFS volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html +type EFSVolumeConfiguration struct { + + // The Amazon EFS file system ID to use. + // + // This member is required. + FileSystemId *string + + // The authorization configuration details for the Amazon EFS file system. + AuthorizationConfig *EFSAuthorizationConfig + + // The directory within the Amazon EFS file system to mount as the root directory + // inside the host. If this parameter is omitted, the root of the Amazon EFS volume + // will be used. Specifying / will have the same effect as omitting this parameter. + // + // If an EFS access point is specified in the authorizationConfig , the root + // directory parameter must either be omitted or set to / which will enforce the + // path set on the EFS access point. + RootDirectory *string + + // Determines whether to use encryption for Amazon EFS data in transit between the + // Amazon ECS host and the Amazon EFS server. Transit encryption must be turned on + // if Amazon EFS IAM authorization is used. If this parameter is omitted, the + // default value of DISABLED is used. For more information, see [Encrypting data in transit] in the Amazon + // Elastic File System User Guide. + // + // [Encrypting data in transit]: https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html + TransitEncryption EFSTransitEncryption + + // The port to use when sending encrypted data between the Amazon ECS host and the + // Amazon EFS server. If you do not specify a transit encryption port, it will use + // the port selection strategy that the Amazon EFS mount helper uses. For more + // information, see [EFS mount helper]in the Amazon Elastic File System User Guide. + // + // [EFS mount helper]: https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html + TransitEncryptionPort *int32 + + noSmithyDocumentSerde +} + +// A list of files containing the environment variables to pass to a container. +// You can specify up to ten environment files. The file must have a .env file +// extension. Each line in an environment file should contain an environment +// variable in VARIABLE=VALUE format. Lines beginning with # are treated as +// comments and are ignored. +// +// If there are environment variables specified using the environment parameter in +// a container definition, they take precedence over the variables contained within +// an environment file. If multiple environment files are specified that contain +// the same variable, they're processed from the top down. We recommend that you +// use unique variable names. For more information, see [Use a file to pass environment variables to a container]in the Amazon Elastic +// Container Service Developer Guide. +// +// Environment variable files are objects in Amazon S3 and all Amazon S3 security +// considerations apply. +// +// You must use the following platforms for the Fargate launch type: +// +// - Linux platform version 1.4.0 or later. +// +// - Windows platform version 1.0.0 or later. +// +// Consider the following when using the Fargate launch type: +// +// - The file is handled like a native Docker env-file. +// +// - There is no support for shell escape handling. +// +// - The container entry point interperts the VARIABLE values. +// +// [Use a file to pass environment variables to a container]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/use-environment-file.html +type EnvironmentFile struct { + + // The file type to use. Environment files are objects in Amazon S3. The only + // supported value is s3 . + // + // This member is required. + Type EnvironmentFileType + + // The Amazon Resource Name (ARN) of the Amazon S3 object containing the + // environment variable file. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// The amount of ephemeral storage to allocate for the task. This parameter is +// used to expand the total amount of ephemeral storage available, beyond the +// default amount, for tasks hosted on Fargate. For more information, see [Using data volumes in tasks]in the +// Amazon ECS Developer Guide;. +// +// For tasks using the Fargate launch type, the task requires the following +// platforms: +// +// - Linux platform version 1.4.0 or later. +// +// - Windows platform version 1.0.0 or later. +// +// [Using data volumes in tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html +type EphemeralStorage struct { + + // The total amount, in GiB, of ephemeral storage to set for the task. The minimum + // supported value is 20 GiB and the maximum supported value is 200 GiB. + // + // This member is required. + SizeInGiB int32 + + noSmithyDocumentSerde +} + +// The details of the execute command configuration. +type ExecuteCommandConfiguration struct { + + // Specify an Key Management Service key ID to encrypt the data between the local + // client and the container. + KmsKeyId *string + + // The log configuration for the results of the execute command actions. The logs + // can be sent to CloudWatch Logs or an Amazon S3 bucket. When logging=OVERRIDE is + // specified, a logConfiguration must be provided. + LogConfiguration *ExecuteCommandLogConfiguration + + // The log setting to use for redirecting logs for your execute command results. + // The following log settings are available. + // + // - NONE : The execute command session is not logged. + // + // - DEFAULT : The awslogs configuration in the task definition is used. If no + // logging parameter is specified, it defaults to this value. If no awslogs log + // driver is configured in the task definition, the output won't be logged. + // + // - OVERRIDE : Specify the logging details as a part of logConfiguration . If + // the OVERRIDE logging option is specified, the logConfiguration is required. + Logging ExecuteCommandLogging + + noSmithyDocumentSerde +} + +// The log configuration for the results of the execute command actions. The logs +// can be sent to CloudWatch Logs or an Amazon S3 bucket. +type ExecuteCommandLogConfiguration struct { + + // Determines whether to use encryption on the CloudWatch logs. If not specified, + // encryption will be off. + CloudWatchEncryptionEnabled bool + + // The name of the CloudWatch log group to send logs to. + // + // The CloudWatch log group must already be created. + CloudWatchLogGroupName *string + + // The name of the S3 bucket to send logs to. + // + // The S3 bucket must already be created. + S3BucketName *string + + // Determines whether to use encryption on the S3 logs. If not specified, + // encryption is not used. + S3EncryptionEnabled bool + + // An optional folder in the S3 bucket to place logs in. + S3KeyPrefix *string + + noSmithyDocumentSerde +} + +// A failed resource. For a list of common causes, see [API failure reasons] in the Amazon Elastic +// Container Service Developer Guide. +// +// [API failure reasons]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html +type Failure struct { + + // The Amazon Resource Name (ARN) of the failed resource. + Arn *string + + // The details of the failure. + Detail *string + + // The reason for the failure. + Reason *string + + noSmithyDocumentSerde +} + +// The FireLens configuration for the container. This is used to specify and +// configure a log router for container logs. For more information, see [Custom log routing]in the +// Amazon Elastic Container Service Developer Guide. +// +// [Custom log routing]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html +type FirelensConfiguration struct { + + // The log router to use. The valid values are fluentd or fluentbit . + // + // This member is required. + Type FirelensConfigurationType + + // The options to use when configuring the log router. This field is optional and + // can be used to specify a custom configuration file or to add additional + // metadata, such as the task, task definition, cluster, and container instance + // details to the log event. If specified, the syntax to use is + // "options":{"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"} + // . For more information, see [Creating a task definition that uses a FireLens configuration]in the Amazon Elastic Container Service Developer + // Guide. + // + // Tasks hosted on Fargate only support the file configuration file type. + // + // [Creating a task definition that uses a FireLens configuration]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html#firelens-taskdef + Options map[string]string + + noSmithyDocumentSerde +} + +// The authorization configuration details for Amazon FSx for Windows File Server +// file system. See [FSxWindowsFileServerVolumeConfiguration]in the Amazon ECS API Reference. +// +// For more information and the input format, see [Amazon FSx for Windows File Server Volumes] in the Amazon Elastic Container +// Service Developer Guide. +// +// [Amazon FSx for Windows File Server Volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html +// [FSxWindowsFileServerVolumeConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_FSxWindowsFileServerVolumeConfiguration.html +type FSxWindowsFileServerAuthorizationConfig struct { + + // The authorization credential option to use. The authorization credential + // options can be provided using either the Amazon Resource Name (ARN) of an + // Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the + // stored credentials. + // + // This member is required. + CredentialsParameter *string + + // A fully qualified domain name hosted by an [Directory Service] Managed Microsoft AD (Active + // Directory) or self-hosted AD on Amazon EC2. + // + // [Directory Service]: https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html + // + // This member is required. + Domain *string + + noSmithyDocumentSerde +} + +// This parameter is specified when you're using [Amazon FSx for Windows File Server] file system for task storage. +// +// For more information and the input format, see [Amazon FSx for Windows File Server volumes] in the Amazon Elastic Container +// Service Developer Guide. +// +// [Amazon FSx for Windows File Server]: https://docs.aws.amazon.com/fsx/latest/WindowsGuide/what-is.html +// [Amazon FSx for Windows File Server volumes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html +type FSxWindowsFileServerVolumeConfiguration struct { + + // The authorization configuration details for the Amazon FSx for Windows File + // Server file system. + // + // This member is required. + AuthorizationConfig *FSxWindowsFileServerAuthorizationConfig + + // The Amazon FSx for Windows File Server file system ID to use. + // + // This member is required. + FileSystemId *string + + // The directory within the Amazon FSx for Windows File Server file system to + // mount as the root directory inside the host. + // + // This member is required. + RootDirectory *string + + noSmithyDocumentSerde +} + +// An object representing a container health check. Health check parameters that +// are specified in a container definition override any Docker health checks that +// exist in the container image (such as those specified in a parent image or from +// the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter +// of docker run. +// +// The Amazon ECS container agent only monitors and reports on the health checks +// specified in the task definition. Amazon ECS does not monitor Docker health +// checks that are embedded in a container image and not specified in the container +// definition. Health check parameters that are specified in a container definition +// override any Docker health checks that exist in the container image. +// +// You can view the health status of both individual containers and a task with +// the DescribeTasks API operation or when viewing the task details in the console. +// +// The health check is designed to make sure that your containers survive agent +// restarts, upgrades, or temporary unavailability. +// +// Amazon ECS performs health checks on containers with the default that launched +// the container instance or the task. +// +// The following describes the possible healthStatus values for a container: +// +// - HEALTHY -The container health check has passed successfully. +// +// - UNHEALTHY -The container health check has failed. +// +// - UNKNOWN -The container health check is being evaluated, there's no container +// health check defined, or Amazon ECS doesn't have the health status of the +// container. +// +// The following describes the possible healthStatus values based on the container +// health checker status of essential containers in the task with the following +// priority order (high to low): +// +// - UNHEALTHY -One or more essential containers have failed their health check. +// +// - UNKNOWN -Any essential container running within the task is in an UNKNOWN +// state and no other essential containers have an UNHEALTHY state. +// +// - HEALTHY -All essential containers within the task have passed their health +// checks. +// +// Consider the following task health example with 2 containers. +// +// - If Container1 is UNHEALTHY and Container2 is UNKNOWN , the task health is +// UNHEALTHY . +// +// - If Container1 is UNHEALTHY and Container2 is HEALTHY , the task health is +// UNHEALTHY . +// +// - If Container1 is HEALTHY and Container2 is UNKNOWN , the task health is +// UNKNOWN . +// +// - If Container1 is HEALTHY and Container2 is HEALTHY , the task health is +// HEALTHY . +// +// Consider the following task health example with 3 containers. +// +// - If Container1 is UNHEALTHY and Container2 is UNKNOWN , and Container3 is +// UNKNOWN , the task health is UNHEALTHY . +// +// - If Container1 is UNHEALTHY and Container2 is UNKNOWN , and Container3 is +// HEALTHY , the task health is UNHEALTHY . +// +// - If Container1 is UNHEALTHY and Container2 is HEALTHY , and Container3 is +// HEALTHY , the task health is UNHEALTHY . +// +// - If Container1 is HEALTHY and Container2 is UNKNOWN , and Container3 is +// HEALTHY , the task health is UNKNOWN . +// +// - If Container1 is HEALTHY and Container2 is UNKNOWN , and Container3 is +// UNKNOWN , the task health is UNKNOWN . +// +// - If Container1 is HEALTHY and Container2 is HEALTHY , and Container3 is +// HEALTHY , the task health is HEALTHY . +// +// If a task is run manually, and not as part of a service, the task will continue +// its lifecycle regardless of its health status. For tasks that are part of a +// service, if the task reports as unhealthy then the task will be stopped and the +// service scheduler will replace it. +// +// The following are notes about container health check support: +// +// - If the Amazon ECS container agent becomes disconnected from the Amazon ECS +// service, this won't cause a container to transition to an UNHEALTHY status. +// This is by design, to ensure that containers remain running during agent +// restarts or temporary unavailability. The health check status is the "last heard +// from" response from the Amazon ECS agent, so if the container was considered +// HEALTHY prior to the disconnect, that status will remain until the agent +// reconnects and another health check occurs. There are no assumptions made about +// the status of the container health checks. +// +// - Container health checks require version 1.17.0 or greater of the Amazon ECS +// container agent. For more information, see [Updating the Amazon ECS container agent]. +// +// - Container health checks are supported for Fargate tasks if you're using +// platform version 1.1.0 or greater. For more information, see [Fargate platform versions]. +// +// - Container health checks aren't supported for tasks that are part of a +// service that's configured to use a Classic Load Balancer. +// +// [Updating the Amazon ECS container agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html +// [Fargate platform versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html +type HealthCheck struct { + + // A string array representing the command that the container runs to determine if + // it is healthy. The string array must start with CMD to run the command + // arguments directly, or CMD-SHELL to run the command with the container's + // default shell. + // + // When you use the Amazon Web Services Management Console JSON panel, the Command + // Line Interface, or the APIs, enclose the list of commands in double quotes and + // brackets. + // + // [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] + // + // You don't include the double quotes and brackets when you use the Amazon Web + // Services Management Console. + // + // CMD-SHELL, curl -f http://localhost/ || exit 1 + // + // An exit code of 0 indicates success, and non-zero exit code indicates failure. + // For more information, see HealthCheck in the docker container create command + // + // This member is required. + Command []string + + // The time period in seconds between each health check execution. You may specify + // between 5 and 300 seconds. The default value is 30 seconds. + Interval *int32 + + // The number of times to retry a failed health check before the container is + // considered unhealthy. You may specify between 1 and 10 retries. The default + // value is 3. + Retries *int32 + + // The optional grace period to provide containers time to bootstrap before failed + // health checks count towards the maximum number of retries. You can specify + // between 0 and 300 seconds. By default, the startPeriod is off. + // + // If a health check succeeds within the startPeriod , then the container is + // considered healthy and any subsequent failures count toward the maximum number + // of retries. + StartPeriod *int32 + + // The time period in seconds to wait for a health check to succeed before it is + // considered a failure. You may specify between 2 and 60 seconds. The default + // value is 5. + Timeout *int32 + + noSmithyDocumentSerde +} + +// Hostnames and IP address entries that are added to the /etc/hosts file of a +// container via the extraHosts parameter of its [ContainerDefinition]. +// +// [ContainerDefinition]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html +type HostEntry struct { + + // The hostname to use in the /etc/hosts entry. + // + // This member is required. + Hostname *string + + // The IP address to use in the /etc/hosts entry. + // + // This member is required. + IpAddress *string + + noSmithyDocumentSerde +} + +// Details on a container instance bind mount host volume. +type HostVolumeProperties struct { + + // When the host parameter is used, specify a sourcePath to declare the path on + // the host container instance that's presented to the container. If this parameter + // is empty, then the Docker daemon has assigned a host path for you. If the host + // parameter contains a sourcePath file location, then the data volume persists at + // the specified location on the host container instance until you delete it + // manually. If the sourcePath value doesn't exist on the host container instance, + // the Docker daemon creates it. If the location does exist, the contents of the + // source path folder are exported. + // + // If you're using the Fargate launch type, the sourcePath parameter is not + // supported. + SourcePath *string + + noSmithyDocumentSerde +} + +// Details on an Elastic Inference accelerator. For more information, see [Working with Amazon Elastic Inference on Amazon ECS] in the +// Amazon Elastic Container Service Developer Guide. +// +// [Working with Amazon Elastic Inference on Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html +type InferenceAccelerator struct { + + // The Elastic Inference accelerator device name. The deviceName must also be + // referenced in a container definition as a [ResourceRequirement]. + // + // [ResourceRequirement]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ResourceRequirement.html + // + // This member is required. + DeviceName *string + + // The Elastic Inference accelerator type to use. + // + // This member is required. + DeviceType *string + + noSmithyDocumentSerde +} + +// Details on an Elastic Inference accelerator task override. This parameter is +// used to override the Elastic Inference accelerator specified in the task +// definition. For more information, see [Working with Amazon Elastic Inference on Amazon ECS]in the Amazon Elastic Container Service +// Developer Guide. +// +// [Working with Amazon Elastic Inference on Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html +type InferenceAcceleratorOverride struct { + + // The Elastic Inference accelerator device name to override for the task. This + // parameter must match a deviceName specified in the task definition. + DeviceName *string + + // The Elastic Inference accelerator type to use. + DeviceType *string + + noSmithyDocumentSerde +} + +// An object representing the result of a container instance health status check. +type InstanceHealthCheckResult struct { + + // The Unix timestamp for when the container instance health status last changed. + LastStatusChange *time.Time + + // The Unix timestamp for when the container instance health status was last + // updated. + LastUpdated *time.Time + + // The container instance health status. + Status InstanceHealthCheckState + + // The type of container instance health status that was verified. + Type InstanceHealthCheckType + + noSmithyDocumentSerde +} + +// The Linux capabilities to add or remove from the default Docker configuration +// for a container defined in the task definition. For more detailed information +// about these Linux capabilities, see the [capabilities(7)]Linux manual page. +// +// [capabilities(7)]: http://man7.org/linux/man-pages/man7/capabilities.7.html +type KernelCapabilities struct { + + // The Linux capabilities for the container that have been added to the default + // configuration provided by Docker. This parameter maps to CapAdd in the docker + // container create command and the --cap-add option to docker run. + // + // Tasks launched on Fargate only support adding the SYS_PTRACE kernel capability. + // + // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | + // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" + // | "IPC_OWNER" | "KILL" | "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | + // "MAC_OVERRIDE" | "MKNOD" | "NET_ADMIN" | "NET_BIND_SERVICE" | "NET_BROADCAST" | + // "NET_RAW" | "SETFCAP" | "SETGID" | "SETPCAP" | "SETUID" | "SYS_ADMIN" | + // "SYS_BOOT" | "SYS_CHROOT" | "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | + // "SYS_PTRACE" | "SYS_RAWIO" | "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | + // "SYSLOG" | "WAKE_ALARM" + Add []string + + // The Linux capabilities for the container that have been removed from the + // default configuration provided by Docker. This parameter maps to CapDrop in the + // docker container create command and the --cap-drop option to docker run. + // + // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | + // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" + // | "IPC_OWNER" | "KILL" | "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | + // "MAC_OVERRIDE" | "MKNOD" | "NET_ADMIN" | "NET_BIND_SERVICE" | "NET_BROADCAST" | + // "NET_RAW" | "SETFCAP" | "SETGID" | "SETPCAP" | "SETUID" | "SYS_ADMIN" | + // "SYS_BOOT" | "SYS_CHROOT" | "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | + // "SYS_PTRACE" | "SYS_RAWIO" | "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | + // "SYSLOG" | "WAKE_ALARM" + Drop []string + + noSmithyDocumentSerde +} + +// A key-value pair object. +type KeyValuePair struct { + + // The name of the key-value pair. For environment variables, this is the name of + // the environment variable. + Name *string + + // The value of the key-value pair. For environment variables, this is the value + // of the environment variable. + Value *string + + noSmithyDocumentSerde +} + +// The Linux-specific options that are applied to the container, such as Linux [KernelCapabilities]. +// +// [KernelCapabilities]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html +type LinuxParameters struct { + + // The Linux capabilities for the container that are added to or dropped from the + // default configuration provided by Docker. + // + // For tasks that use the Fargate launch type, capabilities is supported for all + // platform versions but the add parameter is only supported if using platform + // version 1.4.0 or later. + Capabilities *KernelCapabilities + + // Any host devices to expose to the container. This parameter maps to Devices in + // the docker container create command and the --device option to docker run. + // + // If you're using tasks that use the Fargate launch type, the devices parameter + // isn't supported. + Devices []Device + + // Run an init process inside the container that forwards signals and reaps + // processes. This parameter maps to the --init option to docker run. This + // parameter requires version 1.25 of the Docker Remote API or greater on your + // container instance. To check the Docker Remote API version on your container + // instance, log in to your container instance and run the following command: sudo + // docker version --format '{{.Server.APIVersion}}' + InitProcessEnabled *bool + + // The total amount of swap memory (in MiB) a container can use. This parameter + // will be translated to the --memory-swap option to docker run where the value + // would be the sum of the container memory plus the maxSwap value. + // + // If a maxSwap value of 0 is specified, the container will not use swap. Accepted + // values are 0 or any positive integer. If the maxSwap parameter is omitted, the + // container will use the swap configuration for the container instance it is + // running on. A maxSwap value must be set for the swappiness parameter to be used. + // + // If you're using tasks that use the Fargate launch type, the maxSwap parameter + // isn't supported. + // + // If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't + // supported. + MaxSwap *int32 + + // The value for the size (in MiB) of the /dev/shm volume. This parameter maps to + // the --shm-size option to docker run. + // + // If you are using tasks that use the Fargate launch type, the sharedMemorySize + // parameter is not supported. + SharedMemorySize *int32 + + // This allows you to tune a container's memory swappiness behavior. A swappiness + // value of 0 will cause swapping to not happen unless absolutely necessary. A + // swappiness value of 100 will cause pages to be swapped very aggressively. + // Accepted values are whole numbers between 0 and 100 . If the swappiness + // parameter is not specified, a default value of 60 is used. If a value is not + // specified for maxSwap then this parameter is ignored. This parameter maps to + // the --memory-swappiness option to docker run. + // + // If you're using tasks that use the Fargate launch type, the swappiness + // parameter isn't supported. + // + // If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't + // supported. + Swappiness *int32 + + // The container path, mount options, and size (in MiB) of the tmpfs mount. This + // parameter maps to the --tmpfs option to docker run. + // + // If you're using tasks that use the Fargate launch type, the tmpfs parameter + // isn't supported. + Tmpfs []Tmpfs + + noSmithyDocumentSerde +} + +// The load balancer configuration to use with a service or task set. +// +// When you add, update, or remove a load balancer configuration, Amazon ECS +// starts a new deployment with the updated Elastic Load Balancing configuration. +// This causes tasks to register to and deregister from load balancers. +// +// We recommend that you verify this on a test environment before you update the +// Elastic Load Balancing configuration. +// +// A service-linked role is required for services that use multiple target groups. +// For more information, see [Using service-linked roles]in the Amazon Elastic Container Service Developer +// Guide. +// +// [Using service-linked roles]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html +type LoadBalancer struct { + + // The name of the container (as it appears in a container definition) to + // associate with the load balancer. + // + // You need to specify the container name when configuring the target group for an + // Amazon ECS load balancer. + ContainerName *string + + // The port on the container to associate with the load balancer. This port must + // correspond to a containerPort in the task definition the tasks in the service + // are using. For tasks that use the EC2 launch type, the container instance + // they're launched on must allow ingress traffic on the hostPort of the port + // mapping. + ContainerPort *int32 + + // The name of the load balancer to associate with the Amazon ECS service or task + // set. + // + // If you are using an Application Load Balancer or a Network Load Balancer the + // load balancer name parameter should be omitted. + LoadBalancerName *string + + // The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group + // or groups associated with a service or task set. + // + // A target group ARN is only specified when using an Application Load Balancer or + // Network Load Balancer. + // + // For services using the ECS deployment controller, you can specify one or + // multiple target groups. For more information, see [Registering multiple target groups with a service]in the Amazon Elastic + // Container Service Developer Guide. + // + // For services using the CODE_DEPLOY deployment controller, you're required to + // define two target groups for the load balancer. For more information, see [Blue/green deployment with CodeDeploy]in + // the Amazon Elastic Container Service Developer Guide. + // + // If your service's task definition uses the awsvpc network mode, you must choose + // ip as the target type, not instance . Do this when creating your target groups + // because tasks that use the awsvpc network mode are associated with an elastic + // network interface, not an Amazon EC2 instance. This network mode is required for + // the Fargate launch type. + // + // [Registering multiple target groups with a service]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html + // [Blue/green deployment with CodeDeploy]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html + TargetGroupArn *string + + noSmithyDocumentSerde +} + +// The log configuration for the container. This parameter maps to LogConfig in +// the docker container create command and the --log-driver option to docker run. +// +// By default, containers use the same logging driver that the Docker daemon uses. +// However, the container might use a different logging driver than the Docker +// daemon by specifying a log driver configuration in the container definition. +// +// Understand the following when specifying a log configuration for your +// containers. +// +// - Amazon ECS currently supports a subset of the logging drivers available to +// the Docker daemon. Additional log drivers may be available in future releases of +// the Amazon ECS container agent. +// +// For tasks on Fargate, the supported log drivers are awslogs , splunk , and +// awsfirelens . +// +// For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs +// , fluentd , gelf , json-file , journald , syslog , splunk , and awsfirelens . +// +// - This parameter requires version 1.18 of the Docker Remote API or greater on +// your container instance. +// +// - For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container +// agent must register the available logging drivers with the +// ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on +// that instance can use these log configuration options. For more information, see +// [Amazon ECS container agent configuration]in the Amazon Elastic Container Service Developer Guide. +// +// - For tasks that are on Fargate, because you don't have access to the +// underlying infrastructure your tasks are hosted on, any additional software +// needed must be installed outside of the task. For example, the Fluentd output +// aggregators or a remote host running Logstash to send Gelf logs to. +// +// [Amazon ECS container agent configuration]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html +type LogConfiguration struct { + + // The log driver to use for the container. + // + // For tasks on Fargate, the supported log drivers are awslogs , splunk , and + // awsfirelens . + // + // For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs + // , fluentd , gelf , json-file , journald , syslog , splunk , and awsfirelens . + // + // For more information about using the awslogs log driver, see [Send Amazon ECS logs to CloudWatch] in the Amazon + // Elastic Container Service Developer Guide. + // + // For more information about using the awsfirelens log driver, see [Send Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner]. + // + // If you have a custom driver that isn't listed, you can fork the Amazon ECS + // container agent project that's [available on GitHub]and customize it to work with that driver. We + // encourage you to submit pull requests for changes that you would like to have + // included. However, we don't currently provide support for running modified + // copies of this software. + // + // [Send Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html + // [Send Amazon ECS logs to CloudWatch]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html + // [available on GitHub]: https://github.com/aws/amazon-ecs-agent + // + // This member is required. + LogDriver LogDriver + + // The configuration options to send to the log driver. + // + // The options you can specify depend on the log driver. Some of the options you + // can specify when you use the awslogs log driver to route logs to Amazon + // CloudWatch include the following: + // + // awslogs-create-group Required: No + // + // Specify whether you want the log group to be created automatically. If this + // option isn't specified, it defaults to false . + // + // Your IAM policy must include the logs:CreateLogGroup permission before you + // attempt to use awslogs-create-group . + // + // awslogs-region Required: Yes + // + // Specify the Amazon Web Services Region that the awslogs log driver is to send + // your Docker logs to. You can choose to send all of your logs from clusters in + // different Regions to a single region in CloudWatch Logs. This is so that they're + // all visible in one location. Otherwise, you can separate them by Region for more + // granularity. Make sure that the specified log group exists in the Region that + // you specify with this option. + // + // awslogs-group Required: Yes + // + // Make sure to specify a log group that the awslogs log driver sends its log + // streams to. + // + // awslogs-stream-prefix Required: Yes, when using the Fargate launch + // type.Optional for the EC2 launch type, required for the Fargate launch type. + // + // Use the awslogs-stream-prefix option to associate a log stream with the + // specified prefix, the container name, and the ID of the Amazon ECS task that the + // container belongs to. If you specify a prefix with this option, then the log + // stream takes the format prefix-name/container-name/ecs-task-id . + // + // If you don't specify a prefix with this option, then the log stream is named + // after the container ID that's assigned by the Docker daemon on the container + // instance. Because it's difficult to trace logs back to the container that sent + // them with just the Docker container ID (which is only available on the container + // instance), we recommend that you specify a prefix with this option. + // + // For Amazon ECS services, you can use the service name as the prefix. Doing so, + // you can trace log streams to the service that the container belongs to, the name + // of the container that sent them, and the ID of the task that the container + // belongs to. + // + // You must specify a stream-prefix for your logs to have your logs appear in the + // Log pane when using the Amazon ECS console. + // + // awslogs-datetime-format Required: No + // + // This option defines a multiline start pattern in Python strftime format. A log + // message consists of a line that matches the pattern and any following lines that + // don’t match the pattern. The matched line is the delimiter between log messages. + // + // One example of a use case for using this format is for parsing output such as a + // stack dump, which might otherwise be logged in multiple entries. The correct + // pattern allows it to be captured in a single entry. + // + // For more information, see [awslogs-datetime-format]. + // + // You cannot configure both the awslogs-datetime-format and + // awslogs-multiline-pattern options. + // + // Multiline logging performs regular expression parsing and matching of all log + // messages. This might have a negative impact on logging performance. + // + // awslogs-multiline-pattern Required: No + // + // This option defines a multiline start pattern that uses a regular expression. A + // log message consists of a line that matches the pattern and any following lines + // that don’t match the pattern. The matched line is the delimiter between log + // messages. + // + // For more information, see [awslogs-multiline-pattern]. + // + // This option is ignored if awslogs-datetime-format is also configured. + // + // You cannot configure both the awslogs-datetime-format and + // awslogs-multiline-pattern options. + // + // Multiline logging performs regular expression parsing and matching of all log + // messages. This might have a negative impact on logging performance. + // + // mode Required: No + // + // Valid values: non-blocking | blocking + // + // This option defines the delivery mode of log messages from the container to + // CloudWatch Logs. The delivery mode you choose affects application availability + // when the flow of logs from container to CloudWatch is interrupted. + // + // If you use the blocking mode and the flow of logs to CloudWatch is interrupted, + // calls from container code to write to the stdout and stderr streams will block. + // The logging thread of the application will block as a result. This may cause the + // application to become unresponsive and lead to container healthcheck failure. + // + // If you use the non-blocking mode, the container's logs are instead stored in an + // in-memory intermediate buffer configured with the max-buffer-size option. This + // prevents the application from becoming unresponsive when logs cannot be sent to + // CloudWatch. We recommend using this mode if you want to ensure service + // availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the awslogs container log driver]awslogs . + // + // max-buffer-size Required: No + // + // Default value: 1m + // + // When non-blocking mode is used, the max-buffer-size log option controls the + // size of the buffer that's used for intermediate message storage. Make sure to + // specify an adequate buffer size based on your application. When the buffer fills + // up, further logs cannot be stored. Logs that cannot be stored are lost. + // + // To route logs using the splunk log router, you need to specify a splunk-token + // and a splunk-url . + // + // When you use the awsfirelens log router to route logs to an Amazon Web Services + // Service or Amazon Web Services Partner Network destination for log storage and + // analytics, you can set the log-driver-buffer-limit option to limit the number + // of events that are buffered in memory, before being sent to the log router + // container. It can help to resolve potential log loss issue because high + // throughput might result in memory running out for the buffer inside of Docker. + // + // Other options you can specify when using awsfirelens to route logs depend on + // the destination. When you export logs to Amazon Data Firehose, you can specify + // the Amazon Web Services Region with region and a name for the log stream with + // delivery_stream . + // + // When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon + // Web Services Region with region and a data stream name with stream . + // + // When you export logs to Amazon OpenSearch Service, you can specify options like + // Name , Host (OpenSearch Service endpoint without protocol), Port , Index , Type + // , Aws_auth , Aws_region , Suppress_Type_Name , and tls . + // + // When you export logs to Amazon S3, you can specify the bucket using the bucket + // option. You can also specify region , total_file_size , upload_timeout , and + // use_put_object as options. + // + // This parameter requires version 1.19 of the Docker Remote API or greater on + // your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version --format '{{.Server.APIVersion}}' + // + // [awslogs-multiline-pattern]: https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern + // [awslogs-datetime-format]: https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format + // [Preventing log loss with non-blocking mode in the awslogs container log driver]: http://aws.amazon.com/blogs/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/ + Options map[string]string + + // The secrets to pass to the log configuration. For more information, see [Specifying sensitive data] in the + // Amazon Elastic Container Service Developer Guide. + // + // [Specifying sensitive data]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html + SecretOptions []Secret + + noSmithyDocumentSerde +} + +// Details about the managed agent status for the container. +type ManagedAgent struct { + + // The Unix timestamp for the time when the managed agent was last started. + LastStartedAt *time.Time + + // The last known status of the managed agent. + LastStatus *string + + // The name of the managed agent. When the execute command feature is turned on, + // the managed agent name is ExecuteCommandAgent . + Name ManagedAgentName + + // The reason for why the managed agent is in the state it is in. + Reason *string + + noSmithyDocumentSerde +} + +// An object representing a change in state for a managed agent. +type ManagedAgentStateChange struct { + + // The name of the container that's associated with the managed agent. + // + // This member is required. + ContainerName *string + + // The name of the managed agent. + // + // This member is required. + ManagedAgentName ManagedAgentName + + // The status of the managed agent. + // + // This member is required. + Status *string + + // The reason for the status of the managed agent. + Reason *string + + noSmithyDocumentSerde +} + +// The managed scaling settings for the Auto Scaling group capacity provider. +// +// When managed scaling is turned on, Amazon ECS manages the scale-in and +// scale-out actions of the Auto Scaling group. Amazon ECS manages a target +// tracking scaling policy using an Amazon ECS managed CloudWatch metric with the +// specified targetCapacity value as the target value for the metric. For more +// information, see [Using managed scaling]in the Amazon Elastic Container Service Developer Guide. +// +// If managed scaling is off, the user must manage the scaling of the Auto Scaling +// group. +// +// [Using managed scaling]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/asg-capacity-providers.html#asg-capacity-providers-managed-scaling +type ManagedScaling struct { + + // The period of time, in seconds, after a newly launched Amazon EC2 instance can + // contribute to CloudWatch metrics for Auto Scaling group. If this parameter is + // omitted, the default value of 300 seconds is used. + InstanceWarmupPeriod *int32 + + // The maximum number of Amazon EC2 instances that Amazon ECS will scale out at + // one time. If this parameter is omitted, the default value of 10000 is used. + MaximumScalingStepSize *int32 + + // The minimum number of Amazon EC2 instances that Amazon ECS will scale out at + // one time. The scale in process is not affected by this parameter If this + // parameter is omitted, the default value of 1 is used. + // + // When additional capacity is required, Amazon ECS will scale up the minimum + // scaling step size even if the actual demand is less than the minimum scaling + // step size. + // + // If you use a capacity provider with an Auto Scaling group configured with more + // than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up + // by the exact minimum scaling step size value and will ignore both the maximum + // scaling step size as well as the capacity demand. + MinimumScalingStepSize *int32 + + // Determines whether to use managed scaling for the capacity provider. + Status ManagedScalingStatus + + // The target capacity utilization as a percentage for the capacity provider. The + // specified value must be greater than 0 and less than or equal to 100 . For + // example, if you want the capacity provider to maintain 10% spare capacity, then + // that means the utilization is 90%, so use a targetCapacity of 90 . The default + // value of 100 percent results in the Amazon EC2 instances in your Auto Scaling + // group being completely used. + TargetCapacity *int32 + + noSmithyDocumentSerde +} + +// The managed storage configuration for the cluster. +type ManagedStorageConfiguration struct { + + // Specify the Key Management Service key ID for the Fargate ephemeral storage. + FargateEphemeralStorageKmsKeyId *string + + // Specify a Key Management Service key ID to encrypt the managed storage. + KmsKeyId *string + + noSmithyDocumentSerde +} + +// The details for a volume mount point that's used in a container definition. +type MountPoint struct { + + // The path on the container to mount the host volume at. + ContainerPath *string + + // If this value is true , the container has read-only access to the volume. If + // this value is false , then the container can write to the volume. The default + // value is false . + ReadOnly *bool + + // The name of the volume to mount. Must be a volume name referenced in the name + // parameter of task definition volume . + SourceVolume *string + + noSmithyDocumentSerde +} + +// Details on the network bindings between a container and its host container +// instance. After a task reaches the RUNNING status, manual and automatic host +// and container port assignments are visible in the networkBindings section of [DescribeTasks] +// API responses. +// +// [DescribeTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html +type NetworkBinding struct { + + // The IP address that the container is bound to on the container instance. + BindIP *string + + // The port number on the container that's used with the network binding. + ContainerPort *int32 + + // The port number range on the container that's bound to the dynamically mapped + // host port range. + // + // The following rules apply when you specify a containerPortRange : + // + // - You must use either the bridge network mode or the awsvpc network mode. + // + // - This parameter is available for both the EC2 and Fargate launch types. + // + // - This parameter is available for both the Linux and Windows operating + // systems. + // + // - The container instance must have at least version 1.67.0 of the container + // agent and at least version 1.67.0-1 of the ecs-init package + // + // - You can specify a maximum of 100 port ranges per container. + // + // - You do not specify a hostPortRange . The value of the hostPortRange is set + // as follows: + // + // - For containers in a task with the awsvpc network mode, the hostPortRange is + // set to the same value as the containerPortRange . This is a static mapping + // strategy. + // + // - For containers in a task with the bridge network mode, the Amazon ECS agent + // finds open host ports from the default ephemeral range and passes it to docker + // to bind them to the container ports. + // + // - The containerPortRange valid values are between 1 and 65535. + // + // - A port can only be included in one port mapping per container. + // + // - You cannot specify overlapping port ranges. + // + // - The first port in the range must be less than last port in the range. + // + // - Docker recommends that you turn off the docker-proxy in the Docker daemon + // config file when you have a large number of ports. + // + // For more information, see [Issue #11185]on the Github website. + // + // For information about how to turn off the docker-proxy in the Docker daemon + // config file, see [Docker daemon]in the Amazon ECS Developer Guide. + // + // You can call [DescribeTasks]DescribeTasks to view the hostPortRange which are the host ports + // that are bound to the container ports. + // + // [Docker daemon]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/bootstrap_container_instance.html#bootstrap_docker_daemon + // [DescribeTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html + // [Issue #11185]: https://github.com/moby/moby/issues/11185 + ContainerPortRange *string + + // The port number on the host that's used with the network binding. + HostPort *int32 + + // The port number range on the host that's used with the network binding. This is + // assigned is assigned by Docker and delivered by the Amazon ECS agent. + HostPortRange *string + + // The protocol used for the network binding. + Protocol TransportProtocol + + noSmithyDocumentSerde +} + +// The network configuration for a task or service. +type NetworkConfiguration struct { + + // The VPC subnets and security groups that are associated with a task. + // + // All specified subnets and security groups must be from the same VPC. + AwsvpcConfiguration *AwsVpcConfiguration + + noSmithyDocumentSerde +} + +// An object representing the elastic network interface for tasks that use the +// awsvpc network mode. +type NetworkInterface struct { + + // The attachment ID for the network interface. + AttachmentId *string + + // The private IPv6 address for the network interface. + Ipv6Address *string + + // The private IPv4 address for the network interface. + PrivateIpv4Address *string + + noSmithyDocumentSerde +} + +// An object representing a constraint on task placement. For more information, +// see [Task placement constraints]in the Amazon Elastic Container Service Developer Guide. +// +// If you're using the Fargate launch type, task placement constraints aren't +// supported. +// +// [Task placement constraints]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html +type PlacementConstraint struct { + + // A cluster query language expression to apply to the constraint. The expression + // can have a maximum length of 2000 characters. You can't specify an expression if + // the constraint type is distinctInstance . For more information, see [Cluster query language] in the + // Amazon Elastic Container Service Developer Guide. + // + // [Cluster query language]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html + Expression *string + + // The type of constraint. Use distinctInstance to ensure that each task in a + // particular group is running on a different container instance. Use memberOf to + // restrict the selection to a group of valid candidates. + Type PlacementConstraintType + + noSmithyDocumentSerde +} + +// The task placement strategy for a task or service. For more information, see [Task placement strategies] +// in the Amazon Elastic Container Service Developer Guide. +// +// [Task placement strategies]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html +type PlacementStrategy struct { + + // The field to apply the placement strategy against. For the spread placement + // strategy, valid values are instanceId (or host , which has the same effect), or + // any platform or custom attribute that's applied to a container instance, such as + // attribute:ecs.availability-zone . For the binpack placement strategy, valid + // values are cpu and memory . For the random placement strategy, this field is + // not used. + Field *string + + // The type of placement strategy. The random placement strategy randomly places + // tasks on available candidates. The spread placement strategy spreads placement + // across available candidates evenly based on the field parameter. The binpack + // strategy places tasks on available candidates that have the least available + // amount of the resource that's specified with the field parameter. For example, + // if you binpack on memory, a task is placed on the instance with the least amount + // of remaining memory but still enough to run the task. + Type PlacementStrategyType + + noSmithyDocumentSerde +} + +// The devices that are available on the container instance. The only supported +// device type is a GPU. +type PlatformDevice struct { + + // The ID for the GPUs on the container instance. The available GPU IDs can also + // be obtained on the container instance in the + // /var/lib/ecs/gpu/nvidia_gpu_info.json file. + // + // This member is required. + Id *string + + // The type of device that's available on the container instance. The only + // supported value is GPU . + // + // This member is required. + Type PlatformDeviceType + + noSmithyDocumentSerde +} + +// Port mappings allow containers to access ports on the host container instance +// to send or receive traffic. Port mappings are specified as part of the container +// definition. +// +// If you use containers in a task with the awsvpc or host network mode, specify +// the exposed ports using containerPort . The hostPort can be left blank or it +// must be the same value as the containerPort . +// +// Most fields of this parameter ( containerPort , hostPort , protocol ) maps to +// PortBindings in the docker container create command and the --publish option to +// docker run . If the network mode of a task definition is set to host , host +// ports must either be undefined or match the container port in the port mapping. +// +// You can't expose the same container port for multiple protocols. If you attempt +// this, an error is returned. +// +// After a task reaches the RUNNING status, manual and automatic host and +// container port assignments are visible in the networkBindings section of [DescribeTasks] API +// responses. +// +// [DescribeTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html +type PortMapping struct { + + // The application protocol that's used for the port mapping. This parameter only + // applies to Service Connect. We recommend that you set this parameter to be + // consistent with the protocol that your application uses. If you set this + // parameter, Amazon ECS adds protocol-specific connection handling to the Service + // Connect proxy. If you set this parameter, Amazon ECS adds protocol-specific + // telemetry in the Amazon ECS console and CloudWatch. + // + // If you don't set a value for this parameter, then TCP is used. However, Amazon + // ECS doesn't add protocol-specific telemetry for TCP. + // + // appProtocol is immutable in a Service Connect service. Updating this field + // requires a service deletion and redeployment. + // + // Tasks that run in a namespace can use short names to connect to services in the + // namespace. Tasks can connect to services across all of the clusters in the + // namespace. Tasks connect through a managed proxy container that collects logs + // and metrics for increased visibility. Only the tasks that Amazon ECS services + // create are supported with Service Connect. For more information, see [Service Connect]in the + // Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + AppProtocol ApplicationProtocol + + // The port number on the container that's bound to the user-specified or + // automatically assigned host port. + // + // If you use containers in a task with the awsvpc or host network mode, specify + // the exposed ports using containerPort . + // + // If you use containers in a task with the bridge network mode and you specify a + // container port and not a host port, your container automatically receives a host + // port in the ephemeral port range. For more information, see hostPort . Port + // mappings that are automatically assigned in this way do not count toward the 100 + // reserved ports limit of a container instance. + ContainerPort *int32 + + // The port number range on the container that's bound to the dynamically mapped + // host port range. + // + // The following rules apply when you specify a containerPortRange : + // + // - You must use either the bridge network mode or the awsvpc network mode. + // + // - This parameter is available for both the EC2 and Fargate launch types. + // + // - This parameter is available for both the Linux and Windows operating + // systems. + // + // - The container instance must have at least version 1.67.0 of the container + // agent and at least version 1.67.0-1 of the ecs-init package + // + // - You can specify a maximum of 100 port ranges per container. + // + // - You do not specify a hostPortRange . The value of the hostPortRange is set + // as follows: + // + // - For containers in a task with the awsvpc network mode, the hostPortRange is + // set to the same value as the containerPortRange . This is a static mapping + // strategy. + // + // - For containers in a task with the bridge network mode, the Amazon ECS agent + // finds open host ports from the default ephemeral range and passes it to docker + // to bind them to the container ports. + // + // - The containerPortRange valid values are between 1 and 65535. + // + // - A port can only be included in one port mapping per container. + // + // - You cannot specify overlapping port ranges. + // + // - The first port in the range must be less than last port in the range. + // + // - Docker recommends that you turn off the docker-proxy in the Docker daemon + // config file when you have a large number of ports. + // + // For more information, see [Issue #11185]on the Github website. + // + // For information about how to turn off the docker-proxy in the Docker daemon + // config file, see [Docker daemon]in the Amazon ECS Developer Guide. + // + // You can call [DescribeTasks]DescribeTasks to view the hostPortRange which are the host ports + // that are bound to the container ports. + // + // [Docker daemon]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/bootstrap_container_instance.html#bootstrap_docker_daemon + // [DescribeTasks]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html + // [Issue #11185]: https://github.com/moby/moby/issues/11185 + ContainerPortRange *string + + // The port number on the container instance to reserve for your container. + // + // If you specify a containerPortRange , leave this field empty and the value of + // the hostPort is set as follows: + // + // - For containers in a task with the awsvpc network mode, the hostPort is set + // to the same value as the containerPort . This is a static mapping strategy. + // + // - For containers in a task with the bridge network mode, the Amazon ECS agent + // finds open ports on the host and automatically binds them to the container + // ports. This is a dynamic mapping strategy. + // + // If you use containers in a task with the awsvpc or host network mode, the + // hostPort can either be left blank or set to the same value as the containerPort . + // + // If you use containers in a task with the bridge network mode, you can specify a + // non-reserved host port for your container port mapping, or you can omit the + // hostPort (or set it to 0 ) while specifying a containerPort and your container + // automatically receives a port in the ephemeral port range for your container + // instance operating system and Docker version. + // + // The default ephemeral port range for Docker version 1.6.0 and later is listed + // on the instance under /proc/sys/net/ipv4/ip_local_port_range . If this kernel + // parameter is unavailable, the default ephemeral port range from 49153 through + // 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to + // specify a host port in the ephemeral port range as these are reserved for + // automatic assignment. In general, ports below 32768 are outside of the ephemeral + // port range. + // + // The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and + // the Amazon ECS container agent ports 51678-51680. Any host port that was + // previously specified in a running task is also reserved while the task is + // running. That is, after a task stops, the host port is released. The current + // reserved ports are displayed in the remainingResources of [DescribeContainerInstances] output. A container + // instance can have up to 100 reserved ports at a time. This number includes the + // default reserved ports. Automatically assigned ports aren't included in the 100 + // reserved ports quota. + // + // [DescribeContainerInstances]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeContainerInstances.html + HostPort *int32 + + // The name that's used for the port mapping. This parameter only applies to + // Service Connect. This parameter is the name that you use in the + // serviceConnectConfiguration of a service. The name can include up to 64 + // characters. The characters can include lowercase letters, numbers, underscores + // (_), and hyphens (-). The name can't start with a hyphen. + // + // For more information, see [Service Connect] in the Amazon Elastic Container Service Developer + // Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + Name *string + + // The protocol used for the port mapping. Valid values are tcp and udp . The + // default is tcp . protocol is immutable in a Service Connect service. Updating + // this field requires a service deletion and redeployment. + Protocol TransportProtocol + + noSmithyDocumentSerde +} + +// An object representing the protection status details for a task. You can set +// the protection status with the [UpdateTaskProtection]API and get the status of tasks with the [GetTaskProtection] API. +// +// [UpdateTaskProtection]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateTaskProtection.html +// [GetTaskProtection]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_GetTaskProtection.html +type ProtectedTask struct { + + // The epoch time when protection for the task will expire. + ExpirationDate *time.Time + + // The protection status of the task. If scale-in protection is on for a task, the + // value is true . Otherwise, it is false . + ProtectionEnabled bool + + // The task ARN. + TaskArn *string + + noSmithyDocumentSerde +} + +// The configuration details for the App Mesh proxy. +// +// For tasks that use the EC2 launch type, the container instances require at +// least version 1.26.0 of the container agent and at least version 1.26.0-1 of the +// ecs-init package to use a proxy configuration. If your container instances are +// launched from the Amazon ECS optimized AMI version 20190301 or later, then they +// contain the required versions of the container agent and ecs-init . For more +// information, see [Amazon ECS-optimized Linux AMI] +// +// [Amazon ECS-optimized Linux AMI]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html +type ProxyConfiguration struct { + + // The name of the container that will serve as the App Mesh proxy. + // + // This member is required. + ContainerName *string + + // The set of network configuration parameters to provide the Container Network + // Interface (CNI) plugin, specified as key-value pairs. + // + // - IgnoredUID - (Required) The user ID (UID) of the proxy container as defined + // by the user parameter in a container definition. This is used to ensure the + // proxy ignores its own traffic. If IgnoredGID is specified, this field can be + // empty. + // + // - IgnoredGID - (Required) The group ID (GID) of the proxy container as defined + // by the user parameter in a container definition. This is used to ensure the + // proxy ignores its own traffic. If IgnoredUID is specified, this field can be + // empty. + // + // - AppPorts - (Required) The list of ports that the application uses. Network + // traffic to these ports is forwarded to the ProxyIngressPort and + // ProxyEgressPort . + // + // - ProxyIngressPort - (Required) Specifies the port that incoming traffic to + // the AppPorts is directed to. + // + // - ProxyEgressPort - (Required) Specifies the port that outgoing traffic from + // the AppPorts is directed to. + // + // - EgressIgnoredPorts - (Required) The egress traffic going to the specified + // ports is ignored and not redirected to the ProxyEgressPort . It can be an + // empty list. + // + // - EgressIgnoredIPs - (Required) The egress traffic going to the specified IP + // addresses is ignored and not redirected to the ProxyEgressPort . It can be an + // empty list. + Properties []KeyValuePair + + // The proxy type. The only supported value is APPMESH . + Type ProxyConfigurationType + + noSmithyDocumentSerde +} + +// The repository credentials for private registry authentication. +type RepositoryCredentials struct { + + // The Amazon Resource Name (ARN) of the secret containing the private repository + // credentials. + // + // When you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret + // exists in the same Region as the task that you're launching then you can use + // either the full ARN or the name of the secret. When you use the Amazon Web + // Services Management Console, you must specify the full ARN of the secret. + // + // This member is required. + CredentialsParameter *string + + noSmithyDocumentSerde +} + +// Describes the resources available for a container instance. +type Resource struct { + + // When the doubleValue type is set, the value of the resource must be a double + // precision floating-point type. + DoubleValue float64 + + // When the integerValue type is set, the value of the resource must be an integer. + IntegerValue int32 + + // When the longValue type is set, the value of the resource must be an extended + // precision floating-point type. + LongValue int64 + + // The name of the resource, such as CPU , MEMORY , PORTS , PORTS_UDP , or a + // user-defined resource. + Name *string + + // When the stringSetValue type is set, the value of the resource must be a string + // type. + StringSetValue []string + + // The type of the resource. Valid values: INTEGER , DOUBLE , LONG , or STRINGSET . + Type *string + + noSmithyDocumentSerde +} + +// The type and amount of a resource to assign to a container. The supported +// resource types are GPUs and Elastic Inference accelerators. For more +// information, see [Working with GPUs on Amazon ECS]or [Working with Amazon Elastic Inference on Amazon ECS] in the Amazon Elastic Container Service Developer Guide +// +// [Working with Amazon Elastic Inference on Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html +// [Working with GPUs on Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html +type ResourceRequirement struct { + + // The type of resource to assign to a container. + // + // This member is required. + Type ResourceType + + // The value for the specified resource type. + // + // When the type is GPU , the value is the number of physical GPUs the Amazon ECS + // container agent reserves for the container. The number of GPUs that's reserved + // for all containers in a task can't exceed the number of available GPUs on the + // container instance that the task is launched on. + // + // When the type is InferenceAccelerator , the value matches the deviceName for an [InferenceAccelerator] + // specified in a task definition. + // + // [InferenceAccelerator]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_InferenceAccelerator.html + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// Information about the platform for the Amazon ECS service or task. +// +// For more information about RuntimePlatform , see [RuntimePlatform] in the Amazon Elastic +// Container Service Developer Guide. +// +// [RuntimePlatform]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform +type RuntimePlatform struct { + + // The CPU architecture. + // + // You can run your Linux tasks on an ARM-based platform by setting the value to + // ARM64 . This option is available for tasks that run on Linux Amazon EC2 instance + // or Linux containers on Fargate. + CpuArchitecture CPUArchitecture + + // The operating system. + OperatingSystemFamily OSFamily + + noSmithyDocumentSerde +} + +// A floating-point percentage of the desired number of tasks to place and keep +// running in the task set. +type Scale struct { + + // The unit of measure for the scale value. + Unit ScaleUnit + + // The value, specified as a percent total of a service's desiredCount , to scale + // the task set. Accepted values are numbers between 0 and 100. + Value float64 + + noSmithyDocumentSerde +} + +// An object representing the secret to expose to your container. Secrets can be +// exposed to a container in the following ways: +// +// - To inject sensitive data into your containers as environment variables, use +// the secrets container definition parameter. +// +// - To reference sensitive information in the log configuration of a container, +// use the secretOptions container definition parameter. +// +// For more information, see [Specifying sensitive data] in the Amazon Elastic Container Service Developer +// Guide. +// +// [Specifying sensitive data]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html +type Secret struct { + + // The name of the secret. + // + // This member is required. + Name *string + + // The secret to expose to the container. The supported values are either the full + // ARN of the Secrets Manager secret or the full ARN of the parameter in the SSM + // Parameter Store. + // + // For information about the require Identity and Access Management permissions, + // see [Required IAM permissions for Amazon ECS secrets](for Secrets Manager) or [Required IAM permissions for Amazon ECS secrets] (for Systems Manager Parameter store) in the + // Amazon Elastic Container Service Developer Guide. + // + // If the SSM Parameter Store parameter exists in the same Region as the task + // you're launching, then you can use either the full ARN or name of the parameter. + // If the parameter exists in a different Region, then the full ARN must be + // specified. + // + // [Required IAM permissions for Amazon ECS secrets]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-parameters.html + // + // This member is required. + ValueFrom *string + + noSmithyDocumentSerde +} + +// Details on a service within a cluster. +type Service struct { + + // The capacity provider strategy the service uses. When using the + // DescribeServices API, this field is omitted if the service was created using a + // launch type. + CapacityProviderStrategy []CapacityProviderStrategyItem + + // The Amazon Resource Name (ARN) of the cluster that hosts the service. + ClusterArn *string + + // The Unix timestamp for the time when the service was created. + CreatedAt *time.Time + + // The principal that created the service. + CreatedBy *string + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration + + // The deployment controller type the service is using. + DeploymentController *DeploymentController + + // The current state of deployments for the service. + Deployments []Deployment + + // The desired number of instantiations of the task definition to keep running on + // the service. This value is specified when the service is created with [CreateService], and it + // can be modified with [UpdateService]. + // + // [CreateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html + // [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html + DesiredCount int32 + + // Determines whether to use Amazon ECS managed tags for the tasks in the service. + // For more information, see [Tagging Your Amazon ECS Resources]in the Amazon Elastic Container Service Developer + // Guide. + // + // [Tagging Your Amazon ECS Resources]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html + EnableECSManagedTags bool + + // Determines whether the execute command functionality is turned on for the + // service. If true , the execute command functionality is turned on for all + // containers in tasks as part of the service. + EnableExecuteCommand bool + + // The event stream for your service. A maximum of 100 of the latest events are + // displayed. + Events []ServiceEvent + + // The period of time, in seconds, that the Amazon ECS service scheduler ignores + // unhealthy Elastic Load Balancing target health checks after a task has first + // started. + HealthCheckGracePeriodSeconds *int32 + + // The launch type the service is using. When using the DescribeServices API, this + // field is omitted if the service was created using a capacity provider strategy. + LaunchType LaunchType + + // A list of Elastic Load Balancing load balancer objects. It contains the load + // balancer name, the container name, and the container port to access from the + // load balancer. The container name is as it appears in a container definition. + LoadBalancers []LoadBalancer + + // The VPC subnet and security group configuration for tasks that receive their + // own elastic network interface by using the awsvpc networking mode. + NetworkConfiguration *NetworkConfiguration + + // The number of tasks in the cluster that are in the PENDING state. + PendingCount int32 + + // The placement constraints for the tasks in the service. + PlacementConstraints []PlacementConstraint + + // The placement strategy that determines how tasks for the service are placed. + PlacementStrategy []PlacementStrategy + + // The operating system that your tasks in the service run on. A platform family + // is specified only for tasks using the Fargate launch type. + // + // All tasks that run as part of this service must use the same platformFamily + // value as the service (for example, LINUX ). + PlatformFamily *string + + // The platform version to run your service on. A platform version is only + // specified for tasks that are hosted on Fargate. If one isn't specified, the + // LATEST platform version is used. For more information, see [Fargate Platform Versions] in the Amazon + // Elastic Container Service Developer Guide. + // + // [Fargate Platform Versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // Determines whether to propagate the tags from the task definition or the + // service to the task. If no value is specified, the tags aren't propagated. + PropagateTags PropagateTags + + // The ARN of the IAM role that's associated with the service. It allows the + // Amazon ECS container agent to register container instances with an Elastic Load + // Balancing load balancer. + RoleArn *string + + // The number of tasks in the cluster that are in the RUNNING state. + RunningCount int32 + + // The scheduling strategy to use for the service. For more information, see [Services]. + // + // There are two service scheduler strategies available. + // + // - REPLICA -The replica scheduling strategy places and maintains the desired + // number of tasks across your cluster. By default, the service scheduler spreads + // tasks across Availability Zones. You can use task placement strategies and + // constraints to customize task placement decisions. + // + // - DAEMON -The daemon scheduling strategy deploys exactly one task on each + // active container instance. This task meets all of the task placement constraints + // that you specify in your cluster. The service scheduler also evaluates the task + // placement constraints for running tasks. It stop tasks that don't meet the + // placement constraints. + // + // Fargate tasks don't support the DAEMON scheduling strategy. + // + // [Services]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html + SchedulingStrategy SchedulingStrategy + + // The ARN that identifies the service. For more information about the ARN format, + // see [Amazon Resource Name (ARN)]in the Amazon ECS Developer Guide. + // + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + ServiceArn *string + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // underscores, and hyphens are allowed. Service names must be unique within a + // cluster. However, you can have similarly named services in multiple clusters + // within a Region or across multiple Regions. + ServiceName *string + + // The details for the service discovery registries to assign to this service. For + // more information, see [Service Discovery]. + // + // [Service Discovery]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html + ServiceRegistries []ServiceRegistry + + // The status of the service. The valid values are ACTIVE , DRAINING , or INACTIVE . + Status *string + + // The metadata that you apply to the service to help you categorize and organize + // them. Each tag consists of a key and an optional value. You define bot the key + // and value. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []Tag + + // The task definition to use for tasks in the service. This value is specified + // when the service is created with [CreateService], and it can be modified with [UpdateService]. + // + // [CreateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html + // [UpdateService]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html + TaskDefinition *string + + // Information about a set of Amazon ECS tasks in either an CodeDeploy or an + // EXTERNAL deployment. An Amazon ECS task set includes details such as the desired + // number of tasks, how many tasks are running, and whether the task set serves + // production traffic. + TaskSets []TaskSet + + noSmithyDocumentSerde +} + +// Each alias ("endpoint") is a fully-qualified name and port number that other +// tasks ("clients") can use to connect to this service. +// +// Each name and port mapping must be unique within the namespace. +// +// Tasks that run in a namespace can use short names to connect to services in the +// namespace. Tasks can connect to services across all of the clusters in the +// namespace. Tasks connect through a managed proxy container that collects logs +// and metrics for increased visibility. Only the tasks that Amazon ECS services +// create are supported with Service Connect. For more information, see [Service Connect]in the +// Amazon Elastic Container Service Developer Guide. +// +// [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html +type ServiceConnectClientAlias struct { + + // The listening port number for the Service Connect proxy. This port is available + // inside of all of the tasks within the same namespace. + // + // To avoid changing your applications in client Amazon ECS services, set this to + // the same port that the client application uses by default. For more information, + // see [Service Connect]in the Amazon Elastic Container Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + // + // This member is required. + Port *int32 + + // The dnsName is the name that you use in the applications of client tasks to + // connect to this service. The name must be a valid DNS name but doesn't need to + // be fully-qualified. The name can include up to 127 characters. The name can + // include lowercase letters, numbers, underscores (_), hyphens (-), and periods + // (.). The name can't start with a hyphen. + // + // If this parameter isn't specified, the default value of discoveryName.namespace + // is used. If the discoveryName isn't specified, the port mapping name from the + // task definition is used in portName.namespace . + // + // To avoid changing your applications in client Amazon ECS services, set this to + // the same name that the client application uses by default. For example, a few + // common names are database , db , or the lowercase name of a database, such as + // mysql or redis . For more information, see [Service Connect] in the Amazon Elastic Container + // Service Developer Guide. + // + // [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html + DnsName *string + + noSmithyDocumentSerde +} + +// The Service Connect configuration of your Amazon ECS service. The configuration +// for this service to discover and connect to services, and be discovered by, and +// connected from, other services within a namespace. +// +// Tasks that run in a namespace can use short names to connect to services in the +// namespace. Tasks can connect to services across all of the clusters in the +// namespace. Tasks connect through a managed proxy container that collects logs +// and metrics for increased visibility. Only the tasks that Amazon ECS services +// create are supported with Service Connect. For more information, see [Service Connect]in the +// Amazon Elastic Container Service Developer Guide. +// +// [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html +type ServiceConnectConfiguration struct { + + // Specifies whether to use Service Connect with this service. + // + // This member is required. + Enabled bool + + // The log configuration for the container. This parameter maps to LogConfig in + // the docker container create command and the --log-driver option to docker run. + // + // By default, containers use the same logging driver that the Docker daemon uses. + // However, the container might use a different logging driver than the Docker + // daemon by specifying a log driver configuration in the container definition. + // + // Understand the following when specifying a log configuration for your + // containers. + // + // - Amazon ECS currently supports a subset of the logging drivers available to + // the Docker daemon. Additional log drivers may be available in future releases of + // the Amazon ECS container agent. + // + // For tasks on Fargate, the supported log drivers are awslogs , splunk , and + // awsfirelens . + // + // For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs + // , fluentd , gelf , json-file , journald , syslog , splunk , and awsfirelens . + // + // - This parameter requires version 1.18 of the Docker Remote API or greater on + // your container instance. + // + // - For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container + // agent must register the available logging drivers with the + // ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on + // that instance can use these log configuration options. For more information, see + // [Amazon ECS container agent configuration]in the Amazon Elastic Container Service Developer Guide. + // + // - For tasks that are on Fargate, because you don't have access to the + // underlying infrastructure your tasks are hosted on, any additional software + // needed must be installed outside of the task. For example, the Fluentd output + // aggregators or a remote host running Logstash to send Gelf logs to. + // + // [Amazon ECS container agent configuration]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html + LogConfiguration *LogConfiguration + + // The namespace name or full Amazon Resource Name (ARN) of the Cloud Map + // namespace for use with Service Connect. The namespace must be in the same Amazon + // Web Services Region as the Amazon ECS service and cluster. The type of namespace + // doesn't affect Service Connect. For more information about Cloud Map, see [Working with Services]in + // the Cloud Map Developer Guide. + // + // [Working with Services]: https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html + Namespace *string + + // The list of Service Connect service objects. These are names and aliases (also + // known as endpoints) that are used by other Amazon ECS services to connect to + // this service. + // + // This field is not required for a "client" Amazon ECS service that's a member of + // a namespace only to connect to other services within the namespace. An example + // of this would be a frontend application that accepts incoming requests from + // either a load balancer that's attached to the service or by other means. + // + // An object selects a port from the task definition, assigns a name for the Cloud + // Map service, and a list of aliases (endpoints) and ports for client applications + // to refer to this service. + Services []ServiceConnectService + + noSmithyDocumentSerde +} + +// The Service Connect service object configuration. For more information, see [Service Connect] in +// the Amazon Elastic Container Service Developer Guide. +// +// [Service Connect]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html +type ServiceConnectService struct { + + // The portName must match the name of one of the portMappings from all the + // containers in the task definition of this Amazon ECS service. + // + // This member is required. + PortName *string + + // The list of client aliases for this Service Connect service. You use these to + // assign names that can be used by client applications. The maximum number of + // client aliases that you can have in this list is 1. + // + // Each alias ("endpoint") is a fully-qualified name and port number that other + // Amazon ECS tasks ("clients") can use to connect to this service. + // + // Each name and port mapping must be unique within the namespace. + // + // For each ServiceConnectService , you must provide at least one clientAlias with + // one port . + ClientAliases []ServiceConnectClientAlias + + // The discoveryName is the name of the new Cloud Map service that Amazon ECS + // creates for this Amazon ECS service. This must be unique within the Cloud Map + // namespace. The name can contain up to 64 characters. The name can include + // lowercase letters, numbers, underscores (_), and hyphens (-). The name can't + // start with a hyphen. + // + // If the discoveryName isn't specified, the port mapping name from the task + // definition is used in portName.namespace . + DiscoveryName *string + + // The port number for the Service Connect proxy to listen on. + // + // Use the value of this field to bypass the proxy for traffic on the port number + // specified in the named portMapping in the task definition of this application, + // and then use it in your VPC security groups to allow traffic into the proxy for + // this Amazon ECS service. + // + // In awsvpc mode and Fargate, the default value is the container port number. The + // container port number is in the portMapping in the task definition. In bridge + // mode, the default value is the ephemeral port of the Service Connect proxy. + IngressPortOverride *int32 + + // A reference to an object that represents the configured timeouts for Service + // Connect. + Timeout *TimeoutConfiguration + + // A reference to an object that represents a Transport Layer Security (TLS) + // configuration. + Tls *ServiceConnectTlsConfiguration + + noSmithyDocumentSerde +} + +// The Service Connect resource. Each configuration maps a discovery name to a +// Cloud Map service name. The data is stored in Cloud Map as part of the Service +// Connect configuration for each discovery name of this Amazon ECS service. +// +// A task can resolve the dnsName for each of the clientAliases of a service. +// However a task can't resolve the discovery names. If you want to connect to a +// service, refer to the ServiceConnectConfiguration of that service for the list +// of clientAliases that you can use. +type ServiceConnectServiceResource struct { + + // The Amazon Resource Name (ARN) for the namespace in Cloud Map that matches the + // discovery name for this Service Connect resource. You can use this ARN in other + // integrations with Cloud Map. However, Service Connect can't ensure connectivity + // outside of Amazon ECS. + DiscoveryArn *string + + // The discovery name of this Service Connect resource. + // + // The discoveryName is the name of the new Cloud Map service that Amazon ECS + // creates for this Amazon ECS service. This must be unique within the Cloud Map + // namespace. The name can contain up to 64 characters. The name can include + // lowercase letters, numbers, underscores (_), and hyphens (-). The name can't + // start with a hyphen. + // + // If the discoveryName isn't specified, the port mapping name from the task + // definition is used in portName.namespace . + DiscoveryName *string + + noSmithyDocumentSerde +} + +// The certificate root authority that secures your service. +type ServiceConnectTlsCertificateAuthority struct { + + // The ARN of the Amazon Web Services Private Certificate Authority certificate. + AwsPcaAuthorityArn *string + + noSmithyDocumentSerde +} + +// The key that encrypts and decrypts your resources for Service Connect TLS. +type ServiceConnectTlsConfiguration struct { + + // The signer certificate authority. + // + // This member is required. + IssuerCertificateAuthority *ServiceConnectTlsCertificateAuthority + + // The Amazon Web Services Key Management Service key. + KmsKey *string + + // The Amazon Resource Name (ARN) of the IAM role that's associated with the + // Service Connect TLS. + RoleArn *string + + noSmithyDocumentSerde +} + +// The details for an event that's associated with a service. +type ServiceEvent struct { + + // The Unix timestamp for the time when the event was triggered. + CreatedAt *time.Time + + // The ID string for the event. + Id *string + + // The event message. + Message *string + + noSmithyDocumentSerde +} + +// The configuration for the Amazon EBS volume that Amazon ECS creates and manages +// on your behalf. These settings are used to create each Amazon EBS volume, with +// one volume created for each task in the service. +// +// Many of these parameters map 1:1 with the Amazon EBS CreateVolume API request +// parameters. +type ServiceManagedEBSVolumeConfiguration struct { + + // The ARN of the IAM role to associate with this volume. This is the Amazon ECS + // infrastructure IAM role that is used to manage your Amazon Web Services + // infrastructure. We recommend using the Amazon ECS-managed + // AmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more + // information, see [Amazon ECS infrastructure IAM role]in the Amazon ECS Developer Guide. + // + // [Amazon ECS infrastructure IAM role]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html + // + // This member is required. + RoleArn *string + + // Indicates whether the volume should be encrypted. If no value is specified, + // encryption is turned on by default. This parameter maps 1:1 with the Encrypted + // parameter of the [CreateVolume API]in the Amazon EC2 API Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + Encrypted *bool + + // The Linux filesystem type for the volume. For volumes created from a snapshot, + // you must specify the same filesystem type that the volume was using when the + // snapshot was created. If there is a filesystem type mismatch, the task will fail + // to start. + // + // The available filesystem types are
 ext3 , ext4 , and xfs . If no value is + // specified, the xfs filesystem type is used by default. + FilesystemType TaskFilesystemType + + // The number of I/O operations per second (IOPS). For gp3 , io1 , and io2 + // volumes, this represents the number of IOPS that are provisioned for the volume. + // For gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + // + // The following are the supported values for each volume type. + // + // - gp3 : 3,000 - 16,000 IOPS + // + // - io1 : 100 - 64,000 IOPS + // + // - io2 : 100 - 256,000 IOPS + // + // This parameter is required for io1 and io2 volume types. The default for gp3 + // volumes is 3,000 IOPS . This parameter is not supported for st1 , sc1 , or + // standard volume types. + // + // This parameter maps 1:1 with the Iops parameter of the [CreateVolume API] in the Amazon EC2 API + // Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + Iops *int32 + + // The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key + // Management Service key to use for Amazon EBS encryption. When encryption is + // turned on and no Amazon Web Services Key Management Service key is specified, + // the default Amazon Web Services managed key for Amazon EBS volumes is used. This + // parameter maps 1:1 with the KmsKeyId parameter of the [CreateVolume API] in the Amazon EC2 API + // Reference. + // + // Amazon Web Services authenticates the Amazon Web Services Key Management + // Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that + // is invalid, the action can appear to complete, but eventually fails. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + KmsKeyId *string + + // The size of the volume in GiB. You must specify either a volume size or a + // snapshot ID. If you specify a snapshot ID, the snapshot size is used for the + // volume size by default. You can optionally specify a volume size greater than or + // equal to the snapshot size. This parameter maps 1:1 with the Size parameter of + // the [CreateVolume API]in the Amazon EC2 API Reference. + // + // The following are the supported volume size values for each volume type. + // + // - gp2 and gp3 : 1-16,384 + // + // - io1 and io2 : 4-16,384 + // + // - st1 and sc1 : 125-16,384 + // + // - standard : 1-1,024 + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + SizeInGiB *int32 + + // The snapshot that Amazon ECS uses to create the volume. You must specify either + // a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId + // parameter of the [CreateVolume API]in the Amazon EC2 API Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + SnapshotId *string + + // The tags to apply to the volume. Amazon ECS applies service-managed tags by + // default. This parameter maps 1:1 with the TagSpecifications.N parameter of the [CreateVolume API] + // in the Amazon EC2 API Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + TagSpecifications []EBSTagSpecification + + // The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 + // MiB/s. This parameter maps 1:1 with the Throughput parameter of the [CreateVolume API] in the + // Amazon EC2 API Reference. + // + // This parameter is only supported for the gp3 volume type. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + Throughput *int32 + + // The volume type. This parameter maps 1:1 with the VolumeType parameter of the [CreateVolume API] + // in the Amazon EC2 API Reference. For more information, see [Amazon EBS volume types]in the Amazon EC2 + // User Guide. + // + // The following are the supported volume types. + // + // - General Purpose SSD: gp2 | gp3 + // + // - Provisioned IOPS SSD: io1 | io2 + // + // - Throughput Optimized HDD: st1 + // + // - Cold HDD: sc1 + // + // - Magnetic: standard + // + // The magnetic volume type is not supported on Fargate. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + // [Amazon EBS volume types]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html + VolumeType *string + + noSmithyDocumentSerde +} + +// The details for the service registry. +// +// Each service may be associated with one service registry. Multiple service +// registries for each service are not supported. +// +// When you add, update, or remove the service registries configuration, Amazon +// ECS starts a new deployment. New tasks are registered and deregistered to the +// updated service registry configuration. +type ServiceRegistry struct { + + // The container name value to be used for your service discovery service. It's + // already specified in the task definition. If the task definition that your + // service task specifies uses the bridge or host network mode, you must specify a + // containerName and containerPort combination from the task definition. If the + // task definition that your service task specifies uses the awsvpc network mode + // and a type SRV DNS record is used, you must specify either a containerName and + // containerPort combination or a port value. However, you can't specify both. + ContainerName *string + + // The port value to be used for your service discovery service. It's already + // specified in the task definition. If the task definition your service task + // specifies uses the bridge or host network mode, you must specify a containerName + // and containerPort combination from the task definition. If the task definition + // your service task specifies uses the awsvpc network mode and a type SRV DNS + // record is used, you must specify either a containerName and containerPort + // combination or a port value. However, you can't specify both. + ContainerPort *int32 + + // The port value used if your service discovery service specified an SRV record. + // This field might be used if both the awsvpc network mode and SRV records are + // used. + Port *int32 + + // The Amazon Resource Name (ARN) of the service registry. The currently supported + // service registry is Cloud Map. For more information, see [CreateService]. + // + // [CreateService]: https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html + RegistryArn *string + + noSmithyDocumentSerde +} + +// The configuration for a volume specified in the task definition as a volume +// that is configured at launch time. Currently, the only supported volume type is +// an Amazon EBS volume. +type ServiceVolumeConfiguration struct { + + // The name of the volume. This value must match the volume name from the Volume + // object in the task definition. + // + // This member is required. + Name *string + + // The configuration for the Amazon EBS volume that Amazon ECS creates and manages + // on your behalf. These settings are used to create each Amazon EBS volume, with + // one volume created for each task in the service. The Amazon EBS volumes are + // visible in your account in the Amazon EC2 console once they are created. + ManagedEBSVolume *ServiceManagedEBSVolumeConfiguration + + noSmithyDocumentSerde +} + +// The details for the execute command session. +type Session struct { + + // The ID of the execute command session. + SessionId *string + + // A URL to the managed agent on the container that the SSM Session Manager client + // uses to send commands and receive output from the container. + StreamUrl *string + + // An encrypted token value containing session and caller information. It's used + // to authenticate the connection to the container. + TokenValue *string + + noSmithyDocumentSerde +} + +// The current account setting for a resource. +type Setting struct { + + // The Amazon ECS resource name. + Name SettingName + + // The ARN of the principal. It can be a user, role, or the root user. If this + // field is omitted, the authenticated user is assumed. + PrincipalArn *string + + // Indicates whether Amazon Web Services manages the account setting, or if the + // user manages it. + // + // aws_managed account settings are read-only, as Amazon Web Services manages such + // on the customer's behalf. Currently, the guardDutyActivate account setting is + // the only one Amazon Web Services manages. + Type SettingType + + // Determines whether the account setting is on or off for the specified resource. + Value *string + + noSmithyDocumentSerde +} + +// A list of namespaced kernel parameters to set in the container. This parameter +// maps to Sysctls in the docker container create command and the --sysctl option +// to docker run. For example, you can configure net.ipv4.tcp_keepalive_time +// setting to maintain longer lived connections. +// +// We don't recommend that you specify network-related systemControls parameters +// for multiple containers in a single task that also uses either the awsvpc or +// host network mode. Doing this has the following disadvantages: +// +// - For tasks that use the awsvpc network mode including Fargate, if you set +// systemControls for any container, it applies to all containers in the task. If +// you set different systemControls for multiple containers in a single task, the +// container that's started last determines which systemControls take effect. +// +// - For tasks that use the host network mode, the network namespace +// systemControls aren't supported. +// +// If you're setting an IPC resource namespace to use for the containers in the +// task, the following conditions apply to your system controls. For more +// information, see [IPC mode]. +// +// - For tasks that use the host IPC mode, IPC namespace systemControls aren't +// supported. +// +// - For tasks that use the task IPC mode, IPC namespace systemControls values +// apply to all containers within a task. +// +// This parameter is not supported for Windows containers. +// +// This parameter is only supported for tasks that are hosted on Fargate if the +// tasks are using platform version 1.4.0 or later (Linux). This isn't supported +// for Windows containers on Fargate. +// +// [IPC mode]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ipcmode +type SystemControl struct { + + // The namespaced kernel parameter to set a value for. + Namespace *string + + // The namespaced kernel parameter to set a value for. + // + // Valid IPC namespace values: "kernel.msgmax" | "kernel.msgmnb" | "kernel.msgmni" + // | "kernel.sem" | "kernel.shmall" | "kernel.shmmax" | "kernel.shmmni" | + // "kernel.shm_rmid_forced" , and Sysctls that start with "fs.mqueue.*" + // + // Valid network namespace values: Sysctls that start with "net.*" + // + // All of these values are supported by Fargate. + Value *string + + noSmithyDocumentSerde +} + +// The metadata that you apply to a resource to help you categorize and organize +// them. Each tag consists of a key and an optional value. You define them. +// +// The following basic restrictions apply to tags: +// +// - Maximum number of tags per resource - 50 +// +// - For each resource, each tag key must be unique, and each tag key can have +// only one value. +// +// - Maximum key length - 128 Unicode characters in UTF-8 +// +// - Maximum value length - 256 Unicode characters in UTF-8 +// +// - If your tagging schema is used across multiple services and resources, +// remember that other services may have restrictions on allowed characters. +// Generally allowed characters are: letters, numbers, and spaces representable in +// UTF-8, and the following characters: + - = . _ : / @. +// +// - Tag keys and values are case-sensitive. +// +// - Do not use aws: , AWS: , or any upper or lowercase combination of such as a +// prefix for either keys or values as it is reserved for Amazon Web Services use. +// You cannot edit or delete tag keys or values with this prefix. Tags with this +// prefix do not count against your tags per resource limit. +type Tag struct { + + // One part of a key-value pair that make up a tag. A key is a general label that + // acts like a category for more specific tag values. + Key *string + + // The optional part of a key-value pair that make up a tag. A value acts as a + // descriptor within a tag category (key). + Value *string + + noSmithyDocumentSerde +} + +// Details on a task in a cluster. +type Task struct { + + // The Elastic Network Adapter that's associated with the task if the task uses + // the awsvpc network mode. + Attachments []Attachment + + // The attributes of the task + Attributes []Attribute + + // The Availability Zone for the task. + AvailabilityZone *string + + // The capacity provider that's associated with the task. + CapacityProviderName *string + + // The ARN of the cluster that hosts the task. + ClusterArn *string + + // The connectivity status of a task. + Connectivity Connectivity + + // The Unix timestamp for the time when the task last went into CONNECTED status. + ConnectivityAt *time.Time + + // The ARN of the container instances that host the task. + ContainerInstanceArn *string + + // The containers that's associated with the task. + Containers []Container + + // The number of CPU units used by the task as expressed in a task definition. It + // can be expressed as an integer using CPU units (for example, 1024 ). It can also + // be expressed as a string using vCPUs (for example, 1 vCPU or 1 vcpu ). String + // values are converted to an integer that indicates the CPU units when the task + // definition is registered. + // + // If you use the EC2 launch type, this field is optional. Supported values are + // between 128 CPU units ( 0.125 vCPUs) and 10240 CPU units ( 10 vCPUs). + // + // If you use the Fargate launch type, this field is required. You must use one of + // the following values. These values determine the range of supported values for + // the memory parameter: + // + // The CPU units cannot be less than 1 vCPU when you use Windows containers on + // Fargate. + // + // - 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 + // GB) + // + // - 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 + // GB), 4096 (4 GB) + // + // - 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 + // GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // - 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in + // increments of 1024 (1 GB) + // + // - 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in + // increments of 1024 (1 GB) + // + // - 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments + // + // This option requires Linux platform 1.4.0 or later. + // + // - 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments + // + // This option requires Linux platform 1.4.0 or later. + Cpu *string + + // The Unix timestamp for the time when the task was created. More specifically, + // it's for the time when the task entered the PENDING state. + CreatedAt *time.Time + + // The desired status of the task. For more information, see [Task Lifecycle]. + // + // [Task Lifecycle]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-lifecycle.html + DesiredStatus *string + + // Determines whether execute command functionality is turned on for this task. If + // true , execute command functionality is turned on all the containers in the task. + EnableExecuteCommand bool + + // The ephemeral storage settings for the task. + EphemeralStorage *EphemeralStorage + + // The Unix timestamp for the time when the task execution stopped. + ExecutionStoppedAt *time.Time + + // The Fargate ephemeral storage settings for the task. + FargateEphemeralStorage *TaskEphemeralStorage + + // The name of the task group that's associated with the task. + Group *string + + // The health status for the task. It's determined by the health of the essential + // containers in the task. If all essential containers in the task are reporting as + // HEALTHY , the task status also reports as HEALTHY . If any essential containers + // in the task are reporting as UNHEALTHY or UNKNOWN , the task status also reports + // as UNHEALTHY or UNKNOWN . + // + // The Amazon ECS container agent doesn't monitor or report on Docker health + // checks that are embedded in a container image and not specified in the container + // definition. For example, this includes those specified in a parent image or from + // the image's Dockerfile. Health check parameters that are specified in a + // container definition override any Docker health checks that are found in the + // container image. + HealthStatus HealthStatus + + // The Elastic Inference accelerator that's associated with the task. + InferenceAccelerators []InferenceAccelerator + + // The last known status for the task. For more information, see [Task Lifecycle]. + // + // [Task Lifecycle]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-lifecycle.html + LastStatus *string + + // The infrastructure where your task runs on. For more information, see [Amazon ECS launch types] in the + // Amazon Elastic Container Service Developer Guide. + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + LaunchType LaunchType + + // The amount of memory (in MiB) that the task uses as expressed in a task + // definition. It can be expressed as an integer using MiB (for example, 1024 ). If + // it's expressed as a string using GB (for example, 1GB or 1 GB ), it's converted + // to an integer indicating the MiB when the task definition is registered. + // + // If you use the EC2 launch type, this field is optional. + // + // If you use the Fargate launch type, this field is required. You must use one of + // the following values. The value that you choose determines the range of + // supported values for the cpu parameter. + // + // - 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) + // + // - 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // - 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 + // GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // - Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // - Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + // + // - Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 + // vCPU) + // + // This option requires Linux platform 1.4.0 or later. + // + // - Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 + // vCPU) + // + // This option requires Linux platform 1.4.0 or later. + Memory *string + + // One or more container overrides. + Overrides *TaskOverride + + // The operating system that your tasks are running on. A platform family is + // specified only for tasks that use the Fargate launch type. + // + // All tasks that run as part of this service must use the same platformFamily + // value as the service (for example, LINUX. ). + PlatformFamily *string + + // The platform version where your task runs on. A platform version is only + // specified for tasks that use the Fargate launch type. If you didn't specify one, + // the LATEST platform version is used. For more information, see [Fargate Platform Versions] in the Amazon + // Elastic Container Service Developer Guide. + // + // [Fargate Platform Versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // The Unix timestamp for the time when the container image pull began. + PullStartedAt *time.Time + + // The Unix timestamp for the time when the container image pull completed. + PullStoppedAt *time.Time + + // The Unix timestamp for the time when the task started. More specifically, it's + // for the time when the task transitioned from the PENDING state to the RUNNING + // state. + StartedAt *time.Time + + // The tag specified when a task is started. If an Amazon ECS service started the + // task, the startedBy parameter contains the deployment ID of that service. + StartedBy *string + + // The stop code indicating why a task was stopped. The stoppedReason might + // contain additional details. + // + // For more information about stop code, see [Stopped tasks error codes] in the Amazon ECS Developer Guide. + // + // [Stopped tasks error codes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/stopped-task-error-codes.html + StopCode TaskStopCode + + // The Unix timestamp for the time when the task was stopped. More specifically, + // it's for the time when the task transitioned from the RUNNING state to the + // STOPPED state. + StoppedAt *time.Time + + // The reason that the task was stopped. + StoppedReason *string + + // The Unix timestamp for the time when the task stops. More specifically, it's + // for the time when the task transitions from the RUNNING state to STOPPING . + StoppingAt *time.Time + + // The metadata that you apply to the task to help you categorize and organize the + // task. Each tag consists of a key and an optional value. You define both the key + // and value. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []Tag + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string + + // The ARN of the task definition that creates the task. + TaskDefinitionArn *string + + // The version counter for the task. Every time a task experiences a change that + // starts a CloudWatch event, the version counter is incremented. If you replicate + // your Amazon ECS task state with CloudWatch Events, you can compare the version + // of a task reported by the Amazon ECS API actions with the version reported in + // CloudWatch Events for the task (inside the detail object) to verify that the + // version in your event stream is current. + Version int64 + + noSmithyDocumentSerde +} + +// The details of a task definition which describes the container and volume +// definitions of an Amazon Elastic Container Service task. You can specify which +// Docker images to use, the required resources, and other configurations related +// to launching the task definition through an Amazon ECS service or task. +type TaskDefinition struct { + + // Amazon ECS validates the task definition parameters with those supported by the + // launch type. For more information, see [Amazon ECS launch types]in the Amazon Elastic Container Service + // Developer Guide. + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + Compatibilities []Compatibility + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. For more information about container + // definition parameters and defaults, see [Amazon ECS Task Definitions]in the Amazon Elastic Container Service + // Developer Guide. + // + // [Amazon ECS Task Definitions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html + ContainerDefinitions []ContainerDefinition + + // The number of cpu units used by the task. If you use the EC2 launch type, this + // field is optional. Any value can be used. If you use the Fargate launch type, + // this field is required. You must use one of the following values. The value that + // you choose determines your range of valid values for the memory parameter. + // + // If you use the EC2 launch type, this field is optional. Supported values are + // between 128 CPU units ( 0.125 vCPUs) and 10240 CPU units ( 10 vCPUs). + // + // The CPU units cannot be less than 1 vCPU when you use Windows containers on + // Fargate. + // + // - 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 + // GB) + // + // - 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 + // GB), 4096 (4 GB) + // + // - 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 + // GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // - 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in + // increments of 1024 (1 GB) + // + // - 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in + // increments of 1024 (1 GB) + // + // - 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments + // + // This option requires Linux platform 1.4.0 or later. + // + // - 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments + // + // This option requires Linux platform 1.4.0 or later. + Cpu *string + + // The Unix timestamp for the time when the task definition was deregistered. + DeregisteredAt *time.Time + + // The ephemeral storage settings to use for tasks run with the task definition. + EphemeralStorage *EphemeralStorage + + // The Amazon Resource Name (ARN) of the task execution role that grants the + // Amazon ECS container agent permission to make Amazon Web Services API calls on + // your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS]in + // the Amazon Elastic Container Service Developer Guide. + // + // [IAM roles for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html + ExecutionRoleArn *string + + // The name of a family that this task definition is registered to. Up to 255 + // characters are allowed. Letters (both uppercase and lowercase letters), numbers, + // hyphens (-), and underscores (_) are allowed. + // + // A family groups multiple versions of a task definition. Amazon ECS gives the + // first task definition that you registered to a family a revision number of 1. + // Amazon ECS gives sequential revision numbers to each task definition that you + // add. + Family *string + + // The Elastic Inference accelerator that's associated with the task. + InferenceAccelerators []InferenceAccelerator + + // The IPC resource namespace to use for the containers in the task. The valid + // values are host , task , or none . If host is specified, then all containers + // within the tasks that specified the host IPC mode on the same container + // instance share the same IPC resources with the host Amazon EC2 instance. If task + // is specified, all containers within the specified task share the same IPC + // resources. If none is specified, then IPC resources within the containers of a + // task are private and not shared with other containers in a task or on the + // container instance. If no value is specified, then the IPC resource namespace + // sharing depends on the Docker daemon setting on the container instance. + // + // If the host IPC mode is used, be aware that there is a heightened risk of + // undesired IPC namespace expose. + // + // If you are setting namespaced kernel parameters using systemControls for the + // containers in the task, the following will apply to your IPC resource namespace. + // For more information, see [System Controls]in the Amazon Elastic Container Service Developer + // Guide. + // + // - For tasks that use the host IPC mode, IPC namespace related systemControls + // are not supported. + // + // - For tasks that use the task IPC mode, IPC namespace related systemControls + // will apply to all containers within a task. + // + // This parameter is not supported for Windows containers or tasks run on Fargate. + // + // [System Controls]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html + IpcMode IpcMode + + // The amount (in MiB) of memory used by the task. + // + // If your tasks runs on Amazon EC2 instances, you must specify either a + // task-level memory value or a container-level memory value. This field is + // optional and any value can be used. If a task-level memory value is specified, + // the container-level memory value is optional. For more information regarding + // container-level memory and memory reservation, see [ContainerDefinition]. + // + // If your tasks runs on Fargate, this field is required. You must use one of the + // following values. The value you choose determines your range of valid values for + // the cpu parameter. + // + // - 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) + // + // - 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // - 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 + // GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // - Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // - Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + // + // - Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 + // vCPU) + // + // This option requires Linux platform 1.4.0 or later. + // + // - Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 + // vCPU) + // + // This option requires Linux platform 1.4.0 or later. + // + // [ContainerDefinition]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html + Memory *string + + // The Docker networking mode to use for the containers in the task. The valid + // values are none , bridge , awsvpc , and host . If no network mode is specified, + // the default is bridge . + // + // For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For + // Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. + // For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If + // the network mode is set to none , you cannot specify port mappings in your + // container definitions, and the tasks containers do not have external + // connectivity. The host and awsvpc network modes offer the highest networking + // performance for containers because they use the EC2 network stack instead of the + // virtualized network stack provided by the bridge mode. + // + // With the host and awsvpc network modes, exposed container ports are mapped + // directly to the corresponding host port (for the host network mode) or the + // attached elastic network interface port (for the awsvpc network mode), so you + // cannot take advantage of dynamic host port mappings. + // + // When using the host network mode, you should not run containers using the root + // user (UID 0). It is considered best practice to use a non-root user. + // + // If the network mode is awsvpc , the task is allocated an elastic network + // interface, and you must specify a [NetworkConfiguration]value when you create a service or run a task + // with the task definition. For more information, see [Task Networking]in the Amazon Elastic + // Container Service Developer Guide. + // + // If the network mode is host , you cannot run multiple instantiations of the same + // task on a single container instance when port mappings are used. + // + // [Task Networking]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html + // [NetworkConfiguration]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html + NetworkMode NetworkMode + + // The process namespace to use for the containers in the task. The valid values + // are host or task . On Fargate for Linux containers, the only valid value is task + // . For example, monitoring sidecars might need pidMode to access information + // about other containers running in the same task. + // + // If host is specified, all containers within the tasks that specified the host + // PID mode on the same container instance share the same process namespace with + // the host Amazon EC2 instance. + // + // If task is specified, all containers within the specified task share the same + // process namespace. + // + // If no value is specified, the default is a private namespace for each container. + // + // If the host PID mode is used, there's a heightened risk of undesired process + // namespace exposure. + // + // This parameter is not supported for Windows containers. + // + // This parameter is only supported for tasks that are hosted on Fargate if the + // tasks are using platform version 1.4.0 or later (Linux). This isn't supported + // for Windows containers on Fargate. + PidMode PidMode + + // An array of placement constraint objects to use for tasks. + // + // This parameter isn't supported for tasks run on Fargate. + PlacementConstraints []TaskDefinitionPlacementConstraint + + // The configuration details for the App Mesh proxy. + // + // Your Amazon ECS container instances require at least version 1.26.0 of the + // container agent and at least version 1.26.0-1 of the ecs-init package to use a + // proxy configuration. If your container instances are launched from the Amazon + // ECS optimized AMI version 20190301 or later, they contain the required versions + // of the container agent and ecs-init . For more information, see [Amazon ECS-optimized Linux AMI] in the Amazon + // Elastic Container Service Developer Guide. + // + // [Amazon ECS-optimized Linux AMI]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html + ProxyConfiguration *ProxyConfiguration + + // The Unix timestamp for the time when the task definition was registered. + RegisteredAt *time.Time + + // The principal that registered the task definition. + RegisteredBy *string + + // The container instance attributes required by your task. When an Amazon EC2 + // instance is registered to your cluster, the Amazon ECS container agent assigns + // some standard attributes to the instance. You can apply custom attributes. These + // are specified as key-value pairs using the Amazon ECS console or the [PutAttributes]API. These + // attributes are used when determining task placement for tasks hosted on Amazon + // EC2 instances. For more information, see [Attributes]in the Amazon Elastic Container + // Service Developer Guide. + // + // This parameter isn't supported for tasks run on Fargate. + // + // [PutAttributes]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAttributes.html + // [Attributes]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes + RequiresAttributes []Attribute + + // The task launch types the task definition was validated against. The valid + // values are EC2 , FARGATE , and EXTERNAL . For more information, see [Amazon ECS launch types] in the + // Amazon Elastic Container Service Developer Guide. + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + RequiresCompatibilities []Compatibility + + // The revision of the task in a particular family. The revision is a version + // number of a task definition in a family. When you register a task definition for + // the first time, the revision is 1 . Each time that you register a new revision + // of a task definition in the same family, the revision value always increases by + // one. This is even if you deregistered previous revisions in this family. + Revision int32 + + // The operating system that your task definitions are running on. A platform + // family is specified only for tasks using the Fargate launch type. + // + // When you specify a task in a service, this value must match the runtimePlatform + // value of the service. + RuntimePlatform *RuntimePlatform + + // The status of the task definition. + Status TaskDefinitionStatus + + // The full Amazon Resource Name (ARN) of the task definition. + TaskDefinitionArn *string + + // The short name or full Amazon Resource Name (ARN) of the Identity and Access + // Management role that grants containers in the task permission to call Amazon Web + // Services APIs on your behalf. For informationabout the required IAM roles for + // Amazon ECS, see [IAM roles for Amazon ECS]in the Amazon Elastic Container Service Developer Guide. + // + // [IAM roles for Amazon ECS]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html + TaskRoleArn *string + + // The list of data volume definitions for the task. For more information, see [Using data volumes in tasks] in + // the Amazon Elastic Container Service Developer Guide. + // + // The host and sourcePath parameters aren't supported for tasks run on Fargate. + // + // [Using data volumes in tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html + Volumes []Volume + + noSmithyDocumentSerde +} + +// The constraint on task placement in the task definition. For more information, +// see [Task placement constraints]in the Amazon Elastic Container Service Developer Guide. +// +// Task placement constraints aren't supported for tasks run on Fargate. +// +// [Task placement constraints]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html +type TaskDefinitionPlacementConstraint struct { + + // A cluster query language expression to apply to the constraint. For more + // information, see [Cluster query language]in the Amazon Elastic Container Service Developer Guide. + // + // [Cluster query language]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html + Expression *string + + // The type of constraint. The MemberOf constraint restricts selection to be from + // a group of valid candidates. + Type TaskDefinitionPlacementConstraintType + + noSmithyDocumentSerde +} + +// The amount of ephemeral storage to allocate for the task. +type TaskEphemeralStorage struct { + + // Specify an Key Management Service key ID to encrypt the ephemeral storage for + // the task. + KmsKeyId *string + + // The total amount, in GiB, of the ephemeral storage to set for the task. The + // minimum supported value is 20 GiB and the maximum supported value is
 200 GiB. + SizeInGiB int32 + + noSmithyDocumentSerde +} + +// The configuration for the Amazon EBS volume that Amazon ECS creates and manages +// on your behalf. These settings are used to create each Amazon EBS volume, with +// one volume created for each task. +type TaskManagedEBSVolumeConfiguration struct { + + // The ARN of the IAM role to associate with this volume. This is the Amazon ECS + // infrastructure IAM role that is used to manage your Amazon Web Services + // infrastructure. We recommend using the Amazon ECS-managed + // AmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more + // information, see [Amazon ECS infrastructure IAM role]in the Amazon ECS Developer Guide. + // + // [Amazon ECS infrastructure IAM role]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html + // + // This member is required. + RoleArn *string + + // Indicates whether the volume should be encrypted. If no value is specified, + // encryption is turned on by default. This parameter maps 1:1 with the Encrypted + // parameter of the [CreateVolume API]in the Amazon EC2 API Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + Encrypted *bool + + // The Linux filesystem type for the volume. For volumes created from a snapshot, + // you must specify the same filesystem type that the volume was using when the + // snapshot was created. If there is a filesystem type mismatch, the task will fail + // to start. + // + // The available filesystem types are
 ext3 , ext4 , and xfs . If no value is + // specified, the xfs filesystem type is used by default. + FilesystemType TaskFilesystemType + + // The number of I/O operations per second (IOPS). For gp3 , io1 , and io2 + // volumes, this represents the number of IOPS that are provisioned for the volume. + // For gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + // + // The following are the supported values for each volume type. + // + // - gp3 : 3,000 - 16,000 IOPS + // + // - io1 : 100 - 64,000 IOPS + // + // - io2 : 100 - 256,000 IOPS + // + // This parameter is required for io1 and io2 volume types. The default for gp3 + // volumes is 3,000 IOPS . This parameter is not supported for st1 , sc1 , or + // standard volume types. + // + // This parameter maps 1:1 with the Iops parameter of the [CreateVolume API] in the Amazon EC2 API + // Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + Iops *int32 + + // The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key + // Management Service key to use for Amazon EBS encryption. When encryption is + // turned on and no Amazon Web Services Key Management Service key is specified, + // the default Amazon Web Services managed key for Amazon EBS volumes is used. This + // parameter maps 1:1 with the KmsKeyId parameter of the [CreateVolume API] in the Amazon EC2 API + // Reference. + // + // Amazon Web Services authenticates the Amazon Web Services Key Management + // Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that + // is invalid, the action can appear to complete, but eventually fails. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + KmsKeyId *string + + // The size of the volume in GiB. You must specify either a volume size or a + // snapshot ID. If you specify a snapshot ID, the snapshot size is used for the + // volume size by default. You can optionally specify a volume size greater than or + // equal to the snapshot size. This parameter maps 1:1 with the Size parameter of + // the [CreateVolume API]in the Amazon EC2 API Reference. + // + // The following are the supported volume size values for each volume type. + // + // - gp2 and gp3 : 1-16,384 + // + // - io1 and io2 : 4-16,384 + // + // - st1 and sc1 : 125-16,384 + // + // - standard : 1-1,024 + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + SizeInGiB *int32 + + // The snapshot that Amazon ECS uses to create the volume. You must specify either + // a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId + // parameter of the [CreateVolume API]in the Amazon EC2 API Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + SnapshotId *string + + // The tags to apply to the volume. Amazon ECS applies service-managed tags by + // default. This parameter maps 1:1 with the TagSpecifications.N parameter of the [CreateVolume API] + // in the Amazon EC2 API Reference. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + TagSpecifications []EBSTagSpecification + + // The termination policy for the volume when the task exits. This provides a way + // to control whether Amazon ECS terminates the Amazon EBS volume when the task + // stops. + TerminationPolicy *TaskManagedEBSVolumeTerminationPolicy + + // The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 + // MiB/s. This parameter maps 1:1 with the Throughput parameter of the [CreateVolume API] in the + // Amazon EC2 API Reference. + // + // This parameter is only supported for the gp3 volume type. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + Throughput *int32 + + // The volume type. This parameter maps 1:1 with the VolumeType parameter of the [CreateVolume API] + // in the Amazon EC2 API Reference. For more information, see [Amazon EBS volume types]in the Amazon EC2 + // User Guide. + // + // The following are the supported volume types. + // + // - General Purpose SSD: gp2 | gp3 + // + // - Provisioned IOPS SSD: io1 | io2 + // + // - Throughput Optimized HDD: st1 + // + // - Cold HDD: sc1 + // + // - Magnetic: standard + // + // The magnetic volume type is not supported on Fargate. + // + // [CreateVolume API]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html + // [Amazon EBS volume types]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html + VolumeType *string + + noSmithyDocumentSerde +} + +// The termination policy for the Amazon EBS volume when the task exits. For more +// information, see [Amazon ECS volume termination policy]. +// +// [Amazon ECS volume termination policy]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types +type TaskManagedEBSVolumeTerminationPolicy struct { + + // Indicates whether the volume should be deleted on when the task stops. If a + // value of true is specified, 
Amazon ECS deletes the Amazon EBS volume on your + // behalf when the task goes into the STOPPED state. If no value is specified, the + // 
default value is true is used. When set to false , Amazon ECS leaves the volume + // in your 
account. + // + // This member is required. + DeleteOnTermination *bool + + noSmithyDocumentSerde +} + +// The overrides that are associated with a task. +type TaskOverride struct { + + // One or more container overrides that are sent to a task. + ContainerOverrides []ContainerOverride + + // The CPU override for the task. + Cpu *string + + // The ephemeral storage setting override for the task. + // + // This parameter is only supported for tasks hosted on Fargate that use the + // following platform versions: + // + // - Linux platform version 1.4.0 or later. + // + // - Windows platform version 1.0.0 or later. + EphemeralStorage *EphemeralStorage + + // The Amazon Resource Name (ARN) of the task execution role override for the + // task. For more information, see [Amazon ECS task execution IAM role]in the Amazon Elastic Container Service + // Developer Guide. + // + // [Amazon ECS task execution IAM role]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html + ExecutionRoleArn *string + + // The Elastic Inference accelerator override for the task. + InferenceAcceleratorOverrides []InferenceAcceleratorOverride + + // The memory override for the task. + Memory *string + + // The Amazon Resource Name (ARN) of the role that containers in this task can + // assume. All containers in this task are granted the permissions that are + // specified in this role. For more information, see [IAM Role for Tasks]in the Amazon Elastic + // Container Service Developer Guide. + // + // [IAM Role for Tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html + TaskRoleArn *string + + noSmithyDocumentSerde +} + +// Information about a set of Amazon ECS tasks in either an CodeDeploy or an +// EXTERNAL deployment. An Amazon ECS task set includes details such as the desired +// number of tasks, how many tasks are running, and whether the task set serves +// production traffic. +type TaskSet struct { + + // The capacity provider strategy that are associated with the task set. + CapacityProviderStrategy []CapacityProviderStrategyItem + + // The Amazon Resource Name (ARN) of the cluster that the service that hosts the + // task set exists in. + ClusterArn *string + + // The computed desired count for the task set. This is calculated by multiplying + // the service's desiredCount by the task set's scale percentage. The result is + // always rounded up. For example, if the computed desired count is 1.2, it rounds + // up to 2 tasks. + ComputedDesiredCount int32 + + // The Unix timestamp for the time when the task set was created. + CreatedAt *time.Time + + // The external ID associated with the task set. + // + // If an CodeDeploy deployment created a task set, the externalId parameter + // contains the CodeDeploy deployment ID. + // + // If a task set is created for an external deployment and is associated with a + // service discovery registry, the externalId parameter contains the + // ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute. + ExternalId *string + + // The Fargate ephemeral storage settings for the task set. + FargateEphemeralStorage *DeploymentEphemeralStorage + + // The ID of the task set. + Id *string + + // The launch type the tasks in the task set are using. For more information, see [Amazon ECS launch types] + // in the Amazon Elastic Container Service Developer Guide. + // + // [Amazon ECS launch types]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + LaunchType LaunchType + + // Details on a load balancer that are used with a task set. + LoadBalancers []LoadBalancer + + // The network configuration for the task set. + NetworkConfiguration *NetworkConfiguration + + // The number of tasks in the task set that are in the PENDING status during a + // deployment. A task in the PENDING state is preparing to enter the RUNNING + // state. A task set enters the PENDING status when it launches for the first time + // or when it's restarted after being in the STOPPED state. + PendingCount int32 + + // The operating system that your tasks in the set are running on. A platform + // family is specified only for tasks that use the Fargate launch type. + // + // All tasks in the set must have the same value. + PlatformFamily *string + + // The Fargate platform version where the tasks in the task set are running. A + // platform version is only specified for tasks run on Fargate. For more + // information, see [Fargate platform versions]in the Amazon Elastic Container Service Developer Guide. + // + // [Fargate platform versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html + PlatformVersion *string + + // The number of tasks in the task set that are in the RUNNING status during a + // deployment. A task in the RUNNING state is running and ready for use. + RunningCount int32 + + // A floating-point percentage of your desired number of tasks to place and keep + // running in the task set. + Scale *Scale + + // The Amazon Resource Name (ARN) of the service the task set exists in. + ServiceArn *string + + // The details for the service discovery registries to assign to this task set. + // For more information, see [Service discovery]. + // + // [Service discovery]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html + ServiceRegistries []ServiceRegistry + + // The stability status. This indicates whether the task set has reached a steady + // state. If the following conditions are met, the task set are in STEADY_STATE : + // + // - The task runningCount is equal to the computedDesiredCount . + // + // - The pendingCount is 0 . + // + // - There are no tasks that are running on container instances in the DRAINING + // status. + // + // - All tasks are reporting a healthy status from the load balancers, service + // discovery, and container health checks. + // + // If any of those conditions aren't met, the stability status returns STABILIZING . + StabilityStatus StabilityStatus + + // The Unix timestamp for the time when the task set stability status was + // retrieved. + StabilityStatusAt *time.Time + + // The tag specified when a task set is started. If an CodeDeploy deployment + // created the task set, the startedBy parameter is CODE_DEPLOY . If an external + // deployment created the task set, the startedBy field isn't used. + StartedBy *string + + // The status of the task set. The following describes each state. + // + // PRIMARY The task set is serving production traffic. + // + // ACTIVE The task set isn't serving production traffic. + // + // DRAINING The tasks in the task set are being stopped, and their corresponding + // targets are being deregistered from their target group. + Status *string + + // The metadata that you apply to the task set to help you categorize and organize + // them. Each tag consists of a key and an optional value. You define both. + // + // The following basic restrictions apply to tags: + // + // - Maximum number of tags per resource - 50 + // + // - For each resource, each tag key must be unique, and each tag key can have + // only one value. + // + // - Maximum key length - 128 Unicode characters in UTF-8 + // + // - Maximum value length - 256 Unicode characters in UTF-8 + // + // - If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. + // + // - Tag keys and values are case-sensitive. + // + // - Do not use aws: , AWS: , or any upper or lowercase combination of such as a + // prefix for either keys or values as it is reserved for Amazon Web Services use. + // You cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. + Tags []Tag + + // The task definition that the task set is using. + TaskDefinition *string + + // The Amazon Resource Name (ARN) of the task set. + TaskSetArn *string + + // The Unix timestamp for the time when the task set was last updated. + UpdatedAt *time.Time + + noSmithyDocumentSerde +} + +// Configuration settings for the task volume that was configuredAtLaunch that +// weren't set during RegisterTaskDef . +type TaskVolumeConfiguration struct { + + // The name of the volume. This value must match the volume name from the Volume + // object in the task definition. + // + // This member is required. + Name *string + + // The configuration for the Amazon EBS volume that Amazon ECS creates and manages + // on your behalf. These settings are used to create each Amazon EBS volume, with + // one volume created for each task. The Amazon EBS volumes are visible in your + // account in the Amazon EC2 console once they are created. + ManagedEBSVolume *TaskManagedEBSVolumeConfiguration + + noSmithyDocumentSerde +} + +// An object that represents the timeout configurations for Service Connect. +// +// If idleTimeout is set to a time that is less than perRequestTimeout , the +// connection will close when the idleTimeout is reached and not the +// perRequestTimeout . +type TimeoutConfiguration struct { + + // The amount of time in seconds a connection will stay active while idle. A value + // of 0 can be set to disable idleTimeout . + // + // The idleTimeout default for HTTP / HTTP2 / GRPC is 5 minutes. + // + // The idleTimeout default for TCP is 1 hour. + IdleTimeoutSeconds *int32 + + // The amount of time waiting for the upstream to respond with a complete response + // per request. A value of 0 can be set to disable perRequestTimeout . + // perRequestTimeout can only be set if Service Connect appProtocol isn't TCP . + // Only idleTimeout is allowed for TCP appProtocol . + PerRequestTimeoutSeconds *int32 + + noSmithyDocumentSerde +} + +// The container path, mount options, and size of the tmpfs mount. +type Tmpfs struct { + + // The absolute file path where the tmpfs volume is to be mounted. + // + // This member is required. + ContainerPath *string + + // The maximum size (in MiB) of the tmpfs volume. + // + // This member is required. + Size int32 + + // The list of tmpfs volume mount options. + // + // Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev" | + // "exec" | "noexec" | "sync" | "async" | "dirsync" | "remount" | "mand" | "nomand" + // | "atime" | "noatime" | "diratime" | "nodiratime" | "bind" | "rbind" | + // "unbindable" | "runbindable" | "private" | "rprivate" | "shared" | "rshared" | + // "slave" | "rslave" | "relatime" | "norelatime" | "strictatime" | "nostrictatime" + // | "mode" | "uid" | "gid" | "nr_inodes" | "nr_blocks" | "mpol" + MountOptions []string + + noSmithyDocumentSerde +} + +// The ulimit settings to pass to the container. +// +// Amazon ECS tasks hosted on Fargate use the default resource limit values set by +// the operating system with the exception of the nofile resource limit parameter +// which Fargate overrides. The nofile resource limit sets a restriction on the +// number of open files that a container can use. The default nofile soft limit is +// 65535 and the default hard limit is 65535 . +// +// You can specify the ulimit settings for a container in a task definition. +type Ulimit struct { + + // The hard limit for the ulimit type. The value can be specified in bytes, + // seconds, or as a count, depending on the type of the ulimit . + // + // This member is required. + HardLimit int32 + + // The type of the ulimit . + // + // This member is required. + Name UlimitName + + // The soft limit for the ulimit type. The value can be specified in bytes, + // seconds, or as a count, depending on the type of the ulimit . + // + // This member is required. + SoftLimit int32 + + noSmithyDocumentSerde +} + +// The Docker and Amazon ECS container agent version information about a container +// instance. +type VersionInfo struct { + + // The Git commit hash for the Amazon ECS container agent build on the [amazon-ecs-agent] GitHub + // repository. + // + // [amazon-ecs-agent]: https://github.com/aws/amazon-ecs-agent/commits/master + AgentHash *string + + // The version number of the Amazon ECS container agent. + AgentVersion *string + + // The Docker version that's running on the container instance. + DockerVersion *string + + noSmithyDocumentSerde +} + +// The data volume configuration for tasks launched using this task definition. +// Specifying a volume configuration in a task definition is optional. The volume +// configuration may contain multiple volumes but only one volume configured at +// launch is supported. Each volume defined in the volume configuration may only +// specify a name and one of either configuredAtLaunch , dockerVolumeConfiguration +// , efsVolumeConfiguration , fsxWindowsFileServerVolumeConfiguration , or host . +// If an empty volume configuration is specified, by default Amazon ECS uses a host +// volume. For more information, see [Using data volumes in tasks]. +// +// [Using data volumes in tasks]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html +type Volume struct { + + // Indicates whether the volume should be configured at launch time. This is used + // to create Amazon EBS volumes for standalone tasks or tasks created as part of a + // service. Each task definition revision may only have one volume configured at + // launch in the volume configuration. + // + // To configure a volume at launch time, use this task definition revision and + // specify a volumeConfigurations object when calling the CreateService , + // UpdateService , RunTask or StartTask APIs. + ConfiguredAtLaunch *bool + + // This parameter is specified when you use Docker volumes. + // + // Windows containers only support the use of the local driver. To use bind + // mounts, specify the host parameter instead. + // + // Docker volumes aren't supported by tasks run on Fargate. + DockerVolumeConfiguration *DockerVolumeConfiguration + + // This parameter is specified when you use an Amazon Elastic File System file + // system for task storage. + EfsVolumeConfiguration *EFSVolumeConfiguration + + // This parameter is specified when you use Amazon FSx for Windows File Server + // file system for task storage. + FsxWindowsFileServerVolumeConfiguration *FSxWindowsFileServerVolumeConfiguration + + // This parameter is specified when you use bind mount host volumes. The contents + // of the host parameter determine whether your bind mount host volume persists on + // the host container instance and where it's stored. If the host parameter is + // empty, then the Docker daemon assigns a host path for your data volume. However, + // the data isn't guaranteed to persist after the containers that are associated + // with it stop running. + // + // Windows containers can mount whole directories on the same drive as + // $env:ProgramData . Windows containers can't mount directories on a different + // drive, and mount point can't be across drives. For example, you can mount + // C:\my\path:C:\my\path and D:\:D:\ , but not D:\my\path:C:\my\path or + // D:\:C:\my\path . + Host *HostVolumeProperties + + // The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, + // underscores, and hyphens are allowed. + // + // When using a volume configured at launch, the name is required and must also be + // specified as the volume name in the ServiceVolumeConfiguration or + // TaskVolumeConfiguration parameter when creating your service or standalone task. + // + // For all other types of volumes, this name is referenced in the sourceVolume + // parameter of the mountPoints object in the container definition. + // + // When a volume is using the efsVolumeConfiguration , the name is required. + Name *string + + noSmithyDocumentSerde +} + +// Details on a data volume from another container in the same task definition. +type VolumeFrom struct { + + // If this value is true , the container has read-only access to the volume. If + // this value is false , then the container can write to the volume. The default + // value is false . + ReadOnly *bool + + // The name of another container within the same task definition to mount volumes + // from. + SourceContainer *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/aws-sdk-go-v2/service/ecs/validators.go b/aws-sdk-go-v2/service/ecs/validators.go new file mode 100644 index 00000000000..443c9788f92 --- /dev/null +++ b/aws-sdk-go-v2/service/ecs/validators.go @@ -0,0 +1,3266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + + +package ecs + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithy "github.com/aws/smithy-go" + "github.com/aws/aws-sdk-go-v2/service/ecs/types" +) + +type validateOpCreateCapacityProvider struct { +} + +func (*validateOpCreateCapacityProvider) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCapacityProvider) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateCapacityProviderInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateCapacityProviderInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCluster struct { +} + +func (*validateOpCreateCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateService struct { +} + +func (*validateOpCreateService) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateService) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateServiceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateServiceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateTaskSet struct { +} + +func (*validateOpCreateTaskSet) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateTaskSet) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTaskSetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTaskSetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteAccountSetting struct { +} + +func (*validateOpDeleteAccountSetting) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteAccountSetting) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteAccountSettingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteAccountSettingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteAttributes struct { +} + +func (*validateOpDeleteAttributes) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteAttributesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteAttributesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCapacityProvider struct { +} + +func (*validateOpDeleteCapacityProvider) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCapacityProvider) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteCapacityProviderInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteCapacityProviderInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCluster struct { +} + +func (*validateOpDeleteCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteService struct { +} + +func (*validateOpDeleteService) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteService) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteServiceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteServiceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteTaskDefinitions struct { +} + +func (*validateOpDeleteTaskDefinitions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteTaskDefinitions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteTaskDefinitionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteTaskDefinitionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteTaskSet struct { +} + +func (*validateOpDeleteTaskSet) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteTaskSet) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteTaskSetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteTaskSetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeregisterContainerInstance struct { +} + +func (*validateOpDeregisterContainerInstance) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeregisterContainerInstance) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeregisterContainerInstanceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeregisterContainerInstanceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeregisterTaskDefinition struct { +} + +func (*validateOpDeregisterTaskDefinition) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeregisterTaskDefinition) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeregisterTaskDefinitionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeregisterTaskDefinitionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeContainerInstances struct { +} + +func (*validateOpDescribeContainerInstances) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeContainerInstances) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeContainerInstancesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeContainerInstancesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeServices struct { +} + +func (*validateOpDescribeServices) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeServices) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeServicesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeServicesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeTaskDefinition struct { +} + +func (*validateOpDescribeTaskDefinition) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeTaskDefinition) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeTaskDefinitionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeTaskDefinitionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeTaskSets struct { +} + +func (*validateOpDescribeTaskSets) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeTaskSets) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeTaskSetsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeTaskSetsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeTasks struct { +} + +func (*validateOpDescribeTasks) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeTasks) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeTasksInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeTasksInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpExecuteCommand struct { +} + +func (*validateOpExecuteCommand) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpExecuteCommand) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ExecuteCommandInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpExecuteCommandInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetTaskProtection struct { +} + +func (*validateOpGetTaskProtection) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetTaskProtection) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetTaskProtectionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetTaskProtectionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAttributes struct { +} + +func (*validateOpListAttributes) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAttributesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAttributesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListServicesByNamespace struct { +} + +func (*validateOpListServicesByNamespace) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListServicesByNamespace) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListServicesByNamespaceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListServicesByNamespaceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutAccountSettingDefault struct { +} + +func (*validateOpPutAccountSettingDefault) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutAccountSettingDefault) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutAccountSettingDefaultInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutAccountSettingDefaultInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutAccountSetting struct { +} + +func (*validateOpPutAccountSetting) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutAccountSetting) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutAccountSettingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutAccountSettingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutAttributes struct { +} + +func (*validateOpPutAttributes) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutAttributesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutAttributesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutClusterCapacityProviders struct { +} + +func (*validateOpPutClusterCapacityProviders) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutClusterCapacityProviders) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutClusterCapacityProvidersInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutClusterCapacityProvidersInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterContainerInstance struct { +} + +func (*validateOpRegisterContainerInstance) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterContainerInstance) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterContainerInstanceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterContainerInstanceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterTaskDefinition struct { +} + +func (*validateOpRegisterTaskDefinition) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterTaskDefinition) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterTaskDefinitionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterTaskDefinitionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRunTask struct { +} + +func (*validateOpRunTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRunTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RunTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRunTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartTask struct { +} + +func (*validateOpStartTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStopTask struct { +} + +func (*validateOpStopTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStopTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StopTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStopTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSubmitAttachmentStateChanges struct { +} + +func (*validateOpSubmitAttachmentStateChanges) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSubmitAttachmentStateChanges) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SubmitAttachmentStateChangesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSubmitAttachmentStateChangesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSubmitTaskStateChange struct { +} + +func (*validateOpSubmitTaskStateChange) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSubmitTaskStateChange) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SubmitTaskStateChangeInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSubmitTaskStateChangeInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCapacityProvider struct { +} + +func (*validateOpUpdateCapacityProvider) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCapacityProvider) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateCapacityProviderInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateCapacityProviderInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCluster struct { +} + +func (*validateOpUpdateCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateClusterSettings struct { +} + +func (*validateOpUpdateClusterSettings) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateClusterSettings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateClusterSettingsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateClusterSettingsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateContainerAgent struct { +} + +func (*validateOpUpdateContainerAgent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateContainerAgent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateContainerAgentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateContainerAgentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateContainerInstancesState struct { +} + +func (*validateOpUpdateContainerInstancesState) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateContainerInstancesState) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateContainerInstancesStateInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateContainerInstancesStateInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateService struct { +} + +func (*validateOpUpdateService) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateService) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateServiceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateServiceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateServicePrimaryTaskSet struct { +} + +func (*validateOpUpdateServicePrimaryTaskSet) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateServicePrimaryTaskSet) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateServicePrimaryTaskSetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateServicePrimaryTaskSetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateTaskProtection struct { +} + +func (*validateOpUpdateTaskProtection) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateTaskProtection) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateTaskProtectionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateTaskProtectionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateTaskSet struct { +} + +func (*validateOpUpdateTaskSet) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateTaskSet) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateTaskSetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateTaskSetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateCapacityProviderValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCapacityProvider{}, middleware.After) +} + +func addOpCreateClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCluster{}, middleware.After) +} + +func addOpCreateServiceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateService{}, middleware.After) +} + +func addOpCreateTaskSetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateTaskSet{}, middleware.After) +} + +func addOpDeleteAccountSettingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteAccountSetting{}, middleware.After) +} + +func addOpDeleteAttributesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteAttributes{}, middleware.After) +} + +func addOpDeleteCapacityProviderValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCapacityProvider{}, middleware.After) +} + +func addOpDeleteClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCluster{}, middleware.After) +} + +func addOpDeleteServiceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteService{}, middleware.After) +} + +func addOpDeleteTaskDefinitionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteTaskDefinitions{}, middleware.After) +} + +func addOpDeleteTaskSetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteTaskSet{}, middleware.After) +} + +func addOpDeregisterContainerInstanceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeregisterContainerInstance{}, middleware.After) +} + +func addOpDeregisterTaskDefinitionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeregisterTaskDefinition{}, middleware.After) +} + +func addOpDescribeContainerInstancesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeContainerInstances{}, middleware.After) +} + +func addOpDescribeServicesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeServices{}, middleware.After) +} + +func addOpDescribeTaskDefinitionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeTaskDefinition{}, middleware.After) +} + +func addOpDescribeTaskSetsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeTaskSets{}, middleware.After) +} + +func addOpDescribeTasksValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeTasks{}, middleware.After) +} + +func addOpExecuteCommandValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpExecuteCommand{}, middleware.After) +} + +func addOpGetTaskProtectionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetTaskProtection{}, middleware.After) +} + +func addOpListAttributesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAttributes{}, middleware.After) +} + +func addOpListServicesByNamespaceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListServicesByNamespace{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + +func addOpPutAccountSettingDefaultValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutAccountSettingDefault{}, middleware.After) +} + +func addOpPutAccountSettingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutAccountSetting{}, middleware.After) +} + +func addOpPutAttributesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutAttributes{}, middleware.After) +} + +func addOpPutClusterCapacityProvidersValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutClusterCapacityProviders{}, middleware.After) +} + +func addOpRegisterContainerInstanceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterContainerInstance{}, middleware.After) +} + +func addOpRegisterTaskDefinitionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterTaskDefinition{}, middleware.After) +} + +func addOpRunTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRunTask{}, middleware.After) +} + +func addOpStartTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartTask{}, middleware.After) +} + +func addOpStopTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStopTask{}, middleware.After) +} + +func addOpSubmitAttachmentStateChangesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSubmitAttachmentStateChanges{}, middleware.After) +} + +func addOpSubmitTaskStateChangeValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSubmitTaskStateChange{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateCapacityProviderValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCapacityProvider{}, middleware.After) +} + +func addOpUpdateClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCluster{}, middleware.After) +} + +func addOpUpdateClusterSettingsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateClusterSettings{}, middleware.After) +} + +func addOpUpdateContainerAgentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateContainerAgent{}, middleware.After) +} + +func addOpUpdateContainerInstancesStateValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateContainerInstancesState{}, middleware.After) +} + +func addOpUpdateServiceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateService{}, middleware.After) +} + +func addOpUpdateServicePrimaryTaskSetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateServicePrimaryTaskSet{}, middleware.After) +} + +func addOpUpdateTaskProtectionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateTaskProtection{}, middleware.After) +} + +func addOpUpdateTaskSetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateTaskSet{}, middleware.After) +} + +func validateAttachmentStateChange(v *types.AttachmentStateChange) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AttachmentStateChange"} + if v.AttachmentArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("AttachmentArn")) + } + if v.Status == nil { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAttachmentStateChanges(v []types.AttachmentStateChange) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AttachmentStateChanges"} + for i := range v { + if err := validateAttachmentStateChange(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAttribute(v *types.Attribute) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Attribute"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAttributes(v []types.Attribute) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Attributes"} + for i := range v { + if err := validateAttribute(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAutoScalingGroupProvider(v *types.AutoScalingGroupProvider) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AutoScalingGroupProvider"} + if v.AutoScalingGroupArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("AutoScalingGroupArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAwsVpcConfiguration(v *types.AwsVpcConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AwsVpcConfiguration"} + if v.Subnets == nil { + invalidParams.Add(smithy.NewErrParamRequired("Subnets")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCapacityProviderStrategy(v []types.CapacityProviderStrategyItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CapacityProviderStrategy"} + for i := range v { + if err := validateCapacityProviderStrategyItem(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCapacityProviderStrategyItem(v *types.CapacityProviderStrategyItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CapacityProviderStrategyItem"} + if v.CapacityProvider == nil { + invalidParams.Add(smithy.NewErrParamRequired("CapacityProvider")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateClusterServiceConnectDefaultsRequest(v *types.ClusterServiceConnectDefaultsRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ClusterServiceConnectDefaultsRequest"} + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerDefinition(v *types.ContainerDefinition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerDefinition"} + if v.RepositoryCredentials != nil { + if err := validateRepositoryCredentials(v.RepositoryCredentials); err != nil { + invalidParams.AddNested("RepositoryCredentials", err.(smithy.InvalidParamsError)) + } + } + if v.RestartPolicy != nil { + if err := validateContainerRestartPolicy(v.RestartPolicy); err != nil { + invalidParams.AddNested("RestartPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.EnvironmentFiles != nil { + if err := validateEnvironmentFiles(v.EnvironmentFiles); err != nil { + invalidParams.AddNested("EnvironmentFiles", err.(smithy.InvalidParamsError)) + } + } + if v.LinuxParameters != nil { + if err := validateLinuxParameters(v.LinuxParameters); err != nil { + invalidParams.AddNested("LinuxParameters", err.(smithy.InvalidParamsError)) + } + } + if v.Secrets != nil { + if err := validateSecretList(v.Secrets); err != nil { + invalidParams.AddNested("Secrets", err.(smithy.InvalidParamsError)) + } + } + if v.DependsOn != nil { + if err := validateContainerDependencies(v.DependsOn); err != nil { + invalidParams.AddNested("DependsOn", err.(smithy.InvalidParamsError)) + } + } + if v.ExtraHosts != nil { + if err := validateHostEntryList(v.ExtraHosts); err != nil { + invalidParams.AddNested("ExtraHosts", err.(smithy.InvalidParamsError)) + } + } + if v.Ulimits != nil { + if err := validateUlimitList(v.Ulimits); err != nil { + invalidParams.AddNested("Ulimits", err.(smithy.InvalidParamsError)) + } + } + if v.LogConfiguration != nil { + if err := validateLogConfiguration(v.LogConfiguration); err != nil { + invalidParams.AddNested("LogConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.HealthCheck != nil { + if err := validateHealthCheck(v.HealthCheck); err != nil { + invalidParams.AddNested("HealthCheck", err.(smithy.InvalidParamsError)) + } + } + if v.ResourceRequirements != nil { + if err := validateResourceRequirements(v.ResourceRequirements); err != nil { + invalidParams.AddNested("ResourceRequirements", err.(smithy.InvalidParamsError)) + } + } + if v.FirelensConfiguration != nil { + if err := validateFirelensConfiguration(v.FirelensConfiguration); err != nil { + invalidParams.AddNested("FirelensConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerDefinitions(v []types.ContainerDefinition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerDefinitions"} + for i := range v { + if err := validateContainerDefinition(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerDependencies(v []types.ContainerDependency) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerDependencies"} + for i := range v { + if err := validateContainerDependency(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerDependency(v *types.ContainerDependency) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerDependency"} + if v.ContainerName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerName")) + } + if len(v.Condition) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Condition")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerOverride(v *types.ContainerOverride) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerOverride"} + if v.EnvironmentFiles != nil { + if err := validateEnvironmentFiles(v.EnvironmentFiles); err != nil { + invalidParams.AddNested("EnvironmentFiles", err.(smithy.InvalidParamsError)) + } + } + if v.ResourceRequirements != nil { + if err := validateResourceRequirements(v.ResourceRequirements); err != nil { + invalidParams.AddNested("ResourceRequirements", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerOverrides(v []types.ContainerOverride) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerOverrides"} + for i := range v { + if err := validateContainerOverride(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContainerRestartPolicy(v *types.ContainerRestartPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContainerRestartPolicy"} + if v.Enabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("Enabled")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeploymentAlarms(v *types.DeploymentAlarms) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeploymentAlarms"} + if v.AlarmNames == nil { + invalidParams.Add(smithy.NewErrParamRequired("AlarmNames")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeploymentCircuitBreaker(v *types.DeploymentCircuitBreaker) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeploymentCircuitBreaker"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeploymentConfiguration(v *types.DeploymentConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeploymentConfiguration"} + if v.DeploymentCircuitBreaker != nil { + if err := validateDeploymentCircuitBreaker(v.DeploymentCircuitBreaker); err != nil { + invalidParams.AddNested("DeploymentCircuitBreaker", err.(smithy.InvalidParamsError)) + } + } + if v.Alarms != nil { + if err := validateDeploymentAlarms(v.Alarms); err != nil { + invalidParams.AddNested("Alarms", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeploymentController(v *types.DeploymentController) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeploymentController"} + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDevice(v *types.Device) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Device"} + if v.HostPath == nil { + invalidParams.Add(smithy.NewErrParamRequired("HostPath")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDevicesList(v []types.Device) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DevicesList"} + for i := range v { + if err := validateDevice(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEBSTagSpecification(v *types.EBSTagSpecification) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EBSTagSpecification"} + if len(v.ResourceType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ResourceType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEBSTagSpecifications(v []types.EBSTagSpecification) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EBSTagSpecifications"} + for i := range v { + if err := validateEBSTagSpecification(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEFSVolumeConfiguration(v *types.EFSVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EFSVolumeConfiguration"} + if v.FileSystemId == nil { + invalidParams.Add(smithy.NewErrParamRequired("FileSystemId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEnvironmentFile(v *types.EnvironmentFile) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnvironmentFile"} + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEnvironmentFiles(v []types.EnvironmentFile) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnvironmentFiles"} + for i := range v { + if err := validateEnvironmentFile(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEphemeralStorage(v *types.EphemeralStorage) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EphemeralStorage"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateFirelensConfiguration(v *types.FirelensConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FirelensConfiguration"} + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateFSxWindowsFileServerAuthorizationConfig(v *types.FSxWindowsFileServerAuthorizationConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FSxWindowsFileServerAuthorizationConfig"} + if v.CredentialsParameter == nil { + invalidParams.Add(smithy.NewErrParamRequired("CredentialsParameter")) + } + if v.Domain == nil { + invalidParams.Add(smithy.NewErrParamRequired("Domain")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateFSxWindowsFileServerVolumeConfiguration(v *types.FSxWindowsFileServerVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FSxWindowsFileServerVolumeConfiguration"} + if v.FileSystemId == nil { + invalidParams.Add(smithy.NewErrParamRequired("FileSystemId")) + } + if v.RootDirectory == nil { + invalidParams.Add(smithy.NewErrParamRequired("RootDirectory")) + } + if v.AuthorizationConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("AuthorizationConfig")) + } else if v.AuthorizationConfig != nil { + if err := validateFSxWindowsFileServerAuthorizationConfig(v.AuthorizationConfig); err != nil { + invalidParams.AddNested("AuthorizationConfig", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHealthCheck(v *types.HealthCheck) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HealthCheck"} + if v.Command == nil { + invalidParams.Add(smithy.NewErrParamRequired("Command")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHostEntry(v *types.HostEntry) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HostEntry"} + if v.Hostname == nil { + invalidParams.Add(smithy.NewErrParamRequired("Hostname")) + } + if v.IpAddress == nil { + invalidParams.Add(smithy.NewErrParamRequired("IpAddress")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHostEntryList(v []types.HostEntry) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HostEntryList"} + for i := range v { + if err := validateHostEntry(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInferenceAccelerator(v *types.InferenceAccelerator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InferenceAccelerator"} + if v.DeviceName == nil { + invalidParams.Add(smithy.NewErrParamRequired("DeviceName")) + } + if v.DeviceType == nil { + invalidParams.Add(smithy.NewErrParamRequired("DeviceType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInferenceAccelerators(v []types.InferenceAccelerator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InferenceAccelerators"} + for i := range v { + if err := validateInferenceAccelerator(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLinuxParameters(v *types.LinuxParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LinuxParameters"} + if v.Devices != nil { + if err := validateDevicesList(v.Devices); err != nil { + invalidParams.AddNested("Devices", err.(smithy.InvalidParamsError)) + } + } + if v.Tmpfs != nil { + if err := validateTmpfsList(v.Tmpfs); err != nil { + invalidParams.AddNested("Tmpfs", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLogConfiguration(v *types.LogConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogConfiguration"} + if len(v.LogDriver) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("LogDriver")) + } + if v.SecretOptions != nil { + if err := validateSecretList(v.SecretOptions); err != nil { + invalidParams.AddNested("SecretOptions", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateManagedAgentStateChange(v *types.ManagedAgentStateChange) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ManagedAgentStateChange"} + if v.ContainerName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerName")) + } + if len(v.ManagedAgentName) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ManagedAgentName")) + } + if v.Status == nil { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateManagedAgentStateChanges(v []types.ManagedAgentStateChange) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ManagedAgentStateChanges"} + for i := range v { + if err := validateManagedAgentStateChange(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateNetworkConfiguration(v *types.NetworkConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "NetworkConfiguration"} + if v.AwsvpcConfiguration != nil { + if err := validateAwsVpcConfiguration(v.AwsvpcConfiguration); err != nil { + invalidParams.AddNested("AwsvpcConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePlatformDevice(v *types.PlatformDevice) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PlatformDevice"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePlatformDevices(v []types.PlatformDevice) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PlatformDevices"} + for i := range v { + if err := validatePlatformDevice(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateProxyConfiguration(v *types.ProxyConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ProxyConfiguration"} + if v.ContainerName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRepositoryCredentials(v *types.RepositoryCredentials) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RepositoryCredentials"} + if v.CredentialsParameter == nil { + invalidParams.Add(smithy.NewErrParamRequired("CredentialsParameter")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResourceRequirement(v *types.ResourceRequirement) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResourceRequirement"} + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResourceRequirements(v []types.ResourceRequirement) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResourceRequirements"} + for i := range v { + if err := validateResourceRequirement(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSecret(v *types.Secret) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Secret"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.ValueFrom == nil { + invalidParams.Add(smithy.NewErrParamRequired("ValueFrom")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSecretList(v []types.Secret) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SecretList"} + for i := range v { + if err := validateSecret(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceConnectClientAlias(v *types.ServiceConnectClientAlias) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceConnectClientAlias"} + if v.Port == nil { + invalidParams.Add(smithy.NewErrParamRequired("Port")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceConnectClientAliasList(v []types.ServiceConnectClientAlias) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceConnectClientAliasList"} + for i := range v { + if err := validateServiceConnectClientAlias(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceConnectConfiguration(v *types.ServiceConnectConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceConnectConfiguration"} + if v.Services != nil { + if err := validateServiceConnectServiceList(v.Services); err != nil { + invalidParams.AddNested("Services", err.(smithy.InvalidParamsError)) + } + } + if v.LogConfiguration != nil { + if err := validateLogConfiguration(v.LogConfiguration); err != nil { + invalidParams.AddNested("LogConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceConnectService(v *types.ServiceConnectService) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceConnectService"} + if v.PortName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PortName")) + } + if v.ClientAliases != nil { + if err := validateServiceConnectClientAliasList(v.ClientAliases); err != nil { + invalidParams.AddNested("ClientAliases", err.(smithy.InvalidParamsError)) + } + } + if v.Tls != nil { + if err := validateServiceConnectTlsConfiguration(v.Tls); err != nil { + invalidParams.AddNested("Tls", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceConnectServiceList(v []types.ServiceConnectService) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceConnectServiceList"} + for i := range v { + if err := validateServiceConnectService(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceConnectTlsConfiguration(v *types.ServiceConnectTlsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceConnectTlsConfiguration"} + if v.IssuerCertificateAuthority == nil { + invalidParams.Add(smithy.NewErrParamRequired("IssuerCertificateAuthority")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceManagedEBSVolumeConfiguration(v *types.ServiceManagedEBSVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceManagedEBSVolumeConfiguration"} + if v.TagSpecifications != nil { + if err := validateEBSTagSpecifications(v.TagSpecifications); err != nil { + invalidParams.AddNested("TagSpecifications", err.(smithy.InvalidParamsError)) + } + } + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceVolumeConfiguration(v *types.ServiceVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceVolumeConfiguration"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.ManagedEBSVolume != nil { + if err := validateServiceManagedEBSVolumeConfiguration(v.ManagedEBSVolume); err != nil { + invalidParams.AddNested("ManagedEBSVolume", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServiceVolumeConfigurations(v []types.ServiceVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServiceVolumeConfigurations"} + for i := range v { + if err := validateServiceVolumeConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTaskManagedEBSVolumeConfiguration(v *types.TaskManagedEBSVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TaskManagedEBSVolumeConfiguration"} + if v.TagSpecifications != nil { + if err := validateEBSTagSpecifications(v.TagSpecifications); err != nil { + invalidParams.AddNested("TagSpecifications", err.(smithy.InvalidParamsError)) + } + } + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.TerminationPolicy != nil { + if err := validateTaskManagedEBSVolumeTerminationPolicy(v.TerminationPolicy); err != nil { + invalidParams.AddNested("TerminationPolicy", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTaskManagedEBSVolumeTerminationPolicy(v *types.TaskManagedEBSVolumeTerminationPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TaskManagedEBSVolumeTerminationPolicy"} + if v.DeleteOnTermination == nil { + invalidParams.Add(smithy.NewErrParamRequired("DeleteOnTermination")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTaskOverride(v *types.TaskOverride) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TaskOverride"} + if v.ContainerOverrides != nil { + if err := validateContainerOverrides(v.ContainerOverrides); err != nil { + invalidParams.AddNested("ContainerOverrides", err.(smithy.InvalidParamsError)) + } + } + if v.EphemeralStorage != nil { + if err := validateEphemeralStorage(v.EphemeralStorage); err != nil { + invalidParams.AddNested("EphemeralStorage", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTaskVolumeConfiguration(v *types.TaskVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TaskVolumeConfiguration"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.ManagedEBSVolume != nil { + if err := validateTaskManagedEBSVolumeConfiguration(v.ManagedEBSVolume); err != nil { + invalidParams.AddNested("ManagedEBSVolume", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTaskVolumeConfigurations(v []types.TaskVolumeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TaskVolumeConfigurations"} + for i := range v { + if err := validateTaskVolumeConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTmpfs(v *types.Tmpfs) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tmpfs"} + if v.ContainerPath == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerPath")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTmpfsList(v []types.Tmpfs) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TmpfsList"} + for i := range v { + if err := validateTmpfs(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateUlimit(v *types.Ulimit) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Ulimit"} + if len(v.Name) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateUlimitList(v []types.Ulimit) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UlimitList"} + for i := range v { + if err := validateUlimit(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateVolume(v *types.Volume) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Volume"} + if v.EfsVolumeConfiguration != nil { + if err := validateEFSVolumeConfiguration(v.EfsVolumeConfiguration); err != nil { + invalidParams.AddNested("EfsVolumeConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.FsxWindowsFileServerVolumeConfiguration != nil { + if err := validateFSxWindowsFileServerVolumeConfiguration(v.FsxWindowsFileServerVolumeConfiguration); err != nil { + invalidParams.AddNested("FsxWindowsFileServerVolumeConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateVolumeList(v []types.Volume) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VolumeList"} + for i := range v { + if err := validateVolume(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateCapacityProviderInput(v *CreateCapacityProviderInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateCapacityProviderInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.AutoScalingGroupProvider == nil { + invalidParams.Add(smithy.NewErrParamRequired("AutoScalingGroupProvider")) + } else if v.AutoScalingGroupProvider != nil { + if err := validateAutoScalingGroupProvider(v.AutoScalingGroupProvider); err != nil { + invalidParams.AddNested("AutoScalingGroupProvider", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateClusterInput(v *CreateClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateClusterInput"} + if v.DefaultCapacityProviderStrategy != nil { + if err := validateCapacityProviderStrategy(v.DefaultCapacityProviderStrategy); err != nil { + invalidParams.AddNested("DefaultCapacityProviderStrategy", err.(smithy.InvalidParamsError)) + } + } + if v.ServiceConnectDefaults != nil { + if err := validateClusterServiceConnectDefaultsRequest(v.ServiceConnectDefaults); err != nil { + invalidParams.AddNested("ServiceConnectDefaults", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateServiceInput(v *CreateServiceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateServiceInput"} + if v.ServiceName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ServiceName")) + } + if v.CapacityProviderStrategy != nil { + if err := validateCapacityProviderStrategy(v.CapacityProviderStrategy); err != nil { + invalidParams.AddNested("CapacityProviderStrategy", err.(smithy.InvalidParamsError)) + } + } + if v.DeploymentConfiguration != nil { + if err := validateDeploymentConfiguration(v.DeploymentConfiguration); err != nil { + invalidParams.AddNested("DeploymentConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.NetworkConfiguration != nil { + if err := validateNetworkConfiguration(v.NetworkConfiguration); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.DeploymentController != nil { + if err := validateDeploymentController(v.DeploymentController); err != nil { + invalidParams.AddNested("DeploymentController", err.(smithy.InvalidParamsError)) + } + } + if v.ServiceConnectConfiguration != nil { + if err := validateServiceConnectConfiguration(v.ServiceConnectConfiguration); err != nil { + invalidParams.AddNested("ServiceConnectConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.VolumeConfigurations != nil { + if err := validateServiceVolumeConfigurations(v.VolumeConfigurations); err != nil { + invalidParams.AddNested("VolumeConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateTaskSetInput(v *CreateTaskSetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTaskSetInput"} + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.TaskDefinition == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskDefinition")) + } + if v.NetworkConfiguration != nil { + if err := validateNetworkConfiguration(v.NetworkConfiguration); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.CapacityProviderStrategy != nil { + if err := validateCapacityProviderStrategy(v.CapacityProviderStrategy); err != nil { + invalidParams.AddNested("CapacityProviderStrategy", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteAccountSettingInput(v *DeleteAccountSettingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteAccountSettingInput"} + if len(v.Name) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteAttributesInput(v *DeleteAttributesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteAttributesInput"} + if v.Attributes == nil { + invalidParams.Add(smithy.NewErrParamRequired("Attributes")) + } else if v.Attributes != nil { + if err := validateAttributes(v.Attributes); err != nil { + invalidParams.AddNested("Attributes", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteCapacityProviderInput(v *DeleteCapacityProviderInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteCapacityProviderInput"} + if v.CapacityProvider == nil { + invalidParams.Add(smithy.NewErrParamRequired("CapacityProvider")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteClusterInput(v *DeleteClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteClusterInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteServiceInput(v *DeleteServiceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteServiceInput"} + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteTaskDefinitionsInput(v *DeleteTaskDefinitionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteTaskDefinitionsInput"} + if v.TaskDefinitions == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskDefinitions")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteTaskSetInput(v *DeleteTaskSetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteTaskSetInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if v.TaskSet == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskSet")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeregisterContainerInstanceInput(v *DeregisterContainerInstanceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeregisterContainerInstanceInput"} + if v.ContainerInstance == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerInstance")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeregisterTaskDefinitionInput(v *DeregisterTaskDefinitionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeregisterTaskDefinitionInput"} + if v.TaskDefinition == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskDefinition")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeContainerInstancesInput(v *DescribeContainerInstancesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeContainerInstancesInput"} + if v.ContainerInstances == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerInstances")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeServicesInput(v *DescribeServicesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeServicesInput"} + if v.Services == nil { + invalidParams.Add(smithy.NewErrParamRequired("Services")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeTaskDefinitionInput(v *DescribeTaskDefinitionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeTaskDefinitionInput"} + if v.TaskDefinition == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskDefinition")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeTaskSetsInput(v *DescribeTaskSetsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeTaskSetsInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeTasksInput(v *DescribeTasksInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeTasksInput"} + if v.Tasks == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tasks")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpExecuteCommandInput(v *ExecuteCommandInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExecuteCommandInput"} + if v.Command == nil { + invalidParams.Add(smithy.NewErrParamRequired("Command")) + } + if v.Task == nil { + invalidParams.Add(smithy.NewErrParamRequired("Task")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetTaskProtectionInput(v *GetTaskProtectionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTaskProtectionInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAttributesInput(v *ListAttributesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAttributesInput"} + if len(v.TargetType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("TargetType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListServicesByNamespaceInput(v *ListServicesByNamespaceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListServicesByNamespaceInput"} + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutAccountSettingDefaultInput(v *PutAccountSettingDefaultInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutAccountSettingDefaultInput"} + if len(v.Name) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutAccountSettingInput(v *PutAccountSettingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutAccountSettingInput"} + if len(v.Name) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutAttributesInput(v *PutAttributesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutAttributesInput"} + if v.Attributes == nil { + invalidParams.Add(smithy.NewErrParamRequired("Attributes")) + } else if v.Attributes != nil { + if err := validateAttributes(v.Attributes); err != nil { + invalidParams.AddNested("Attributes", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutClusterCapacityProvidersInput(v *PutClusterCapacityProvidersInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutClusterCapacityProvidersInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.CapacityProviders == nil { + invalidParams.Add(smithy.NewErrParamRequired("CapacityProviders")) + } + if v.DefaultCapacityProviderStrategy == nil { + invalidParams.Add(smithy.NewErrParamRequired("DefaultCapacityProviderStrategy")) + } else if v.DefaultCapacityProviderStrategy != nil { + if err := validateCapacityProviderStrategy(v.DefaultCapacityProviderStrategy); err != nil { + invalidParams.AddNested("DefaultCapacityProviderStrategy", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterContainerInstanceInput(v *RegisterContainerInstanceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterContainerInstanceInput"} + if v.Attributes != nil { + if err := validateAttributes(v.Attributes); err != nil { + invalidParams.AddNested("Attributes", err.(smithy.InvalidParamsError)) + } + } + if v.PlatformDevices != nil { + if err := validatePlatformDevices(v.PlatformDevices); err != nil { + invalidParams.AddNested("PlatformDevices", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterTaskDefinitionInput(v *RegisterTaskDefinitionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterTaskDefinitionInput"} + if v.Family == nil { + invalidParams.Add(smithy.NewErrParamRequired("Family")) + } + if v.ContainerDefinitions == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerDefinitions")) + } else if v.ContainerDefinitions != nil { + if err := validateContainerDefinitions(v.ContainerDefinitions); err != nil { + invalidParams.AddNested("ContainerDefinitions", err.(smithy.InvalidParamsError)) + } + } + if v.Volumes != nil { + if err := validateVolumeList(v.Volumes); err != nil { + invalidParams.AddNested("Volumes", err.(smithy.InvalidParamsError)) + } + } + if v.ProxyConfiguration != nil { + if err := validateProxyConfiguration(v.ProxyConfiguration); err != nil { + invalidParams.AddNested("ProxyConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.InferenceAccelerators != nil { + if err := validateInferenceAccelerators(v.InferenceAccelerators); err != nil { + invalidParams.AddNested("InferenceAccelerators", err.(smithy.InvalidParamsError)) + } + } + if v.EphemeralStorage != nil { + if err := validateEphemeralStorage(v.EphemeralStorage); err != nil { + invalidParams.AddNested("EphemeralStorage", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRunTaskInput(v *RunTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RunTaskInput"} + if v.CapacityProviderStrategy != nil { + if err := validateCapacityProviderStrategy(v.CapacityProviderStrategy); err != nil { + invalidParams.AddNested("CapacityProviderStrategy", err.(smithy.InvalidParamsError)) + } + } + if v.NetworkConfiguration != nil { + if err := validateNetworkConfiguration(v.NetworkConfiguration); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.Overrides != nil { + if err := validateTaskOverride(v.Overrides); err != nil { + invalidParams.AddNested("Overrides", err.(smithy.InvalidParamsError)) + } + } + if v.TaskDefinition == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskDefinition")) + } + if v.VolumeConfigurations != nil { + if err := validateTaskVolumeConfigurations(v.VolumeConfigurations); err != nil { + invalidParams.AddNested("VolumeConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartTaskInput(v *StartTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartTaskInput"} + if v.ContainerInstances == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerInstances")) + } + if v.NetworkConfiguration != nil { + if err := validateNetworkConfiguration(v.NetworkConfiguration); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.Overrides != nil { + if err := validateTaskOverride(v.Overrides); err != nil { + invalidParams.AddNested("Overrides", err.(smithy.InvalidParamsError)) + } + } + if v.TaskDefinition == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskDefinition")) + } + if v.VolumeConfigurations != nil { + if err := validateTaskVolumeConfigurations(v.VolumeConfigurations); err != nil { + invalidParams.AddNested("VolumeConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStopTaskInput(v *StopTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StopTaskInput"} + if v.Task == nil { + invalidParams.Add(smithy.NewErrParamRequired("Task")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSubmitAttachmentStateChangesInput(v *SubmitAttachmentStateChangesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SubmitAttachmentStateChangesInput"} + if v.Attachments == nil { + invalidParams.Add(smithy.NewErrParamRequired("Attachments")) + } else if v.Attachments != nil { + if err := validateAttachmentStateChanges(v.Attachments); err != nil { + invalidParams.AddNested("Attachments", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSubmitTaskStateChangeInput(v *SubmitTaskStateChangeInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SubmitTaskStateChangeInput"} + if v.Attachments != nil { + if err := validateAttachmentStateChanges(v.Attachments); err != nil { + invalidParams.AddNested("Attachments", err.(smithy.InvalidParamsError)) + } + } + if v.ManagedAgents != nil { + if err := validateManagedAgentStateChanges(v.ManagedAgents); err != nil { + invalidParams.AddNested("ManagedAgents", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateCapacityProviderInput(v *UpdateCapacityProviderInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateCapacityProviderInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.AutoScalingGroupProvider == nil { + invalidParams.Add(smithy.NewErrParamRequired("AutoScalingGroupProvider")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateClusterInput(v *UpdateClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateClusterInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.ServiceConnectDefaults != nil { + if err := validateClusterServiceConnectDefaultsRequest(v.ServiceConnectDefaults); err != nil { + invalidParams.AddNested("ServiceConnectDefaults", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateClusterSettingsInput(v *UpdateClusterSettingsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateClusterSettingsInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.Settings == nil { + invalidParams.Add(smithy.NewErrParamRequired("Settings")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateContainerAgentInput(v *UpdateContainerAgentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateContainerAgentInput"} + if v.ContainerInstance == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerInstance")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateContainerInstancesStateInput(v *UpdateContainerInstancesStateInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateContainerInstancesStateInput"} + if v.ContainerInstances == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContainerInstances")) + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateServiceInput(v *UpdateServiceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateServiceInput"} + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if v.CapacityProviderStrategy != nil { + if err := validateCapacityProviderStrategy(v.CapacityProviderStrategy); err != nil { + invalidParams.AddNested("CapacityProviderStrategy", err.(smithy.InvalidParamsError)) + } + } + if v.DeploymentConfiguration != nil { + if err := validateDeploymentConfiguration(v.DeploymentConfiguration); err != nil { + invalidParams.AddNested("DeploymentConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.NetworkConfiguration != nil { + if err := validateNetworkConfiguration(v.NetworkConfiguration); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.ServiceConnectConfiguration != nil { + if err := validateServiceConnectConfiguration(v.ServiceConnectConfiguration); err != nil { + invalidParams.AddNested("ServiceConnectConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.VolumeConfigurations != nil { + if err := validateServiceVolumeConfigurations(v.VolumeConfigurations); err != nil { + invalidParams.AddNested("VolumeConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateServicePrimaryTaskSetInput(v *UpdateServicePrimaryTaskSetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateServicePrimaryTaskSetInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if v.PrimaryTaskSet == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrimaryTaskSet")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateTaskProtectionInput(v *UpdateTaskProtectionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateTaskProtectionInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.Tasks == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tasks")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateTaskSetInput(v *UpdateTaskSetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateTaskSetInput"} + if v.Cluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("Cluster")) + } + if v.Service == nil { + invalidParams.Add(smithy.NewErrParamRequired("Service")) + } + if v.TaskSet == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskSet")) + } + if v.Scale == nil { + invalidParams.Add(smithy.NewErrParamRequired("Scale")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +}