From c6cdeabcb7320cd34bd9d560596f803fec8a39df Mon Sep 17 00:00:00 2001 From: awssdkgo Date: Tue, 9 Mar 2021 19:16:48 +0000 Subject: [PATCH] Release v1.37.27 (2021-03-09) === ### Service Client Updates * `service/autoscaling`: Updates service API and documentation * EC2 Auto Scaling now supports setting a local time zone for cron expressions in scheduled actions, removing the need to adjust for Daylight Saving Time (DST) * `service/codeguruprofiler`: Updates service API and documentation * `service/elasticfilesystem`: Updates service API, documentation, and examples * AWS EFS is introducing one-zone file systems. * `service/iotwireless`: Updates service API * `service/rds`: Updates service API, documentation, and paginators * This release adds support for Amazon RDS Proxy endpoints. --- CHANGELOG.md | 13 + aws/endpoints/defaults.go | 4 +- aws/version.go | 2 +- models/apis/autoscaling/2011-01-01/api-2.json | 9 +- .../apis/autoscaling/2011-01-01/docs-2.json | 17 +- .../codeguruprofiler/2019-07-18/api-2.json | 46 +- .../codeguruprofiler/2019-07-18/docs-2.json | 11 +- .../elasticfilesystem/2015-02-01/api-2.json | 70 +- .../elasticfilesystem/2015-02-01/docs-2.json | 70 +- .../2015-02-01/examples-1.json | 5 +- models/apis/iotwireless/2020-11-22/api-2.json | 1 + models/apis/rds/2014-10-31/api-2.json | 245 +++- models/apis/rds/2014-10-31/docs-2.json | 139 ++- models/apis/rds/2014-10-31/paginators-1.json | 6 + models/endpoints/endpoints.json | 4 +- service/autoscaling/api.go | 72 +- service/codeguruprofiler/api.go | 83 +- service/codeguruprofiler/doc.go | 34 +- service/efs/api.go | 397 ++++-- service/efs/errors.go | 19 +- service/efs/examples_test.go | 10 +- service/rds/api.go | 1069 ++++++++++++++++- service/rds/errors.go | 30 +- service/rds/rdsiface/interface.go | 19 + 24 files changed, 2112 insertions(+), 263 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3742a9136a2..cafdcd4ab2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +Release v1.37.27 (2021-03-09) +=== + +### Service Client Updates +* `service/autoscaling`: Updates service API and documentation + * EC2 Auto Scaling now supports setting a local time zone for cron expressions in scheduled actions, removing the need to adjust for Daylight Saving Time (DST) +* `service/codeguruprofiler`: Updates service API and documentation +* `service/elasticfilesystem`: Updates service API, documentation, and examples + * AWS EFS is introducing one-zone file systems. +* `service/iotwireless`: Updates service API +* `service/rds`: Updates service API, documentation, and paginators + * This release adds support for Amazon RDS Proxy endpoints. + Release v1.37.26 (2021-03-08) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 12cfff43037..6f4c32fa052 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -367,12 +367,13 @@ var awsPartition = partition{ "amplifybackend": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -7058,6 +7059,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, diff --git a/aws/version.go b/aws/version.go index e31f79a3ad9..34f87405300 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.26" +const SDKVersion = "1.37.27" diff --git a/models/apis/autoscaling/2011-01-01/api-2.json b/models/apis/autoscaling/2011-01-01/api-2.json index 46fe934552a..564f45ff55a 100644 --- a/models/apis/autoscaling/2011-01-01/api-2.json +++ b/models/apis/autoscaling/2011-01-01/api-2.json @@ -2237,7 +2237,8 @@ "Recurrence":{"shape":"XmlStringMaxLen255"}, "MinSize":{"shape":"AutoScalingGroupMinSize"}, "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, - "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "TimeZone":{"shape":"XmlStringMaxLen255"} } }, "RecordLifecycleActionHeartbeatAnswer":{ @@ -2388,7 +2389,8 @@ "Recurrence":{"shape":"XmlStringMaxLen255"}, "MinSize":{"shape":"AutoScalingGroupMinSize"}, "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, - "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "TimeZone":{"shape":"XmlStringMaxLen255"} } }, "ScheduledUpdateGroupActionRequest":{ @@ -2401,7 +2403,8 @@ "Recurrence":{"shape":"XmlStringMaxLen255"}, "MinSize":{"shape":"AutoScalingGroupMinSize"}, "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, - "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "TimeZone":{"shape":"XmlStringMaxLen255"} } }, "ScheduledUpdateGroupActionRequests":{ diff --git a/models/apis/autoscaling/2011-01-01/docs-2.json b/models/apis/autoscaling/2011-01-01/docs-2.json index a2940f68634..46c20e0cfb7 100644 --- a/models/apis/autoscaling/2011-01-01/docs-2.json +++ b/models/apis/autoscaling/2011-01-01/docs-2.json @@ -6,7 +6,7 @@ "AttachLoadBalancerTargetGroups": "

Attaches one or more target groups to the specified Auto Scaling group.

This operation is used with the following load balancer types:

To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.

For more information, see Elastic Load Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

", "AttachLoadBalancers": "

To attach an Application Load Balancer, Network Load Balancer, or Gateway Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.

Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers.

To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers API.

For more information, see Elastic Load Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

", "BatchDeleteScheduledAction": "

Deletes one or more scheduled actions for the specified Auto Scaling group.

", - "BatchPutScheduledUpdateGroupAction": "

Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

", + "BatchPutScheduledUpdateGroupAction": "

Creates or updates one or more scheduled scaling actions for an Auto Scaling group.

", "CancelInstanceRefresh": "

Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

", "CompleteLifecycleAction": "

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

", "CreateAutoScalingGroup": "

We strongly recommend using a launch template when calling this operation to ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2.

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

For introductory exercises for creating an Auto Scaling group, see Getting started with Amazon EC2 Auto Scaling and Tutorial: Set up a scaled and load-balanced application in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto Scaling groups in the Amazon EC2 Auto Scaling User Guide.

Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances.

", @@ -49,7 +49,7 @@ "PutLifecycleHook": "

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using the RecordLifecycleActionHeartbeat API call.

  5. If you finish before the timeout period ends, complete the lifecycle action using the CompleteLifecycleAction API call.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.

", "PutNotificationConfiguration": "

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.

", "PutScalingPolicy": "

Creates or updates a scaling policy for an Auto Scaling group.

For more information about using scaling policies to scale your Auto Scaling group, see Target tracking scaling policies and Step and simple scaling policies in the Amazon EC2 Auto Scaling User Guide.

", - "PutScheduledUpdateGroupAction": "

Creates or updates a scheduled scaling action for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

", + "PutScheduledUpdateGroupAction": "

Creates or updates a scheduled scaling action for an Auto Scaling group.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

", "RecordLifecycleActionHeartbeat": "

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling lifecycle in the Amazon EC2 Auto Scaling User Guide.

", "ResumeProcesses": "

Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

", "SetDesiredCapacity": "

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.

", @@ -1427,7 +1427,7 @@ } }, "ScheduledUpdateGroupActionRequest": { - "base": "

Describes information used for one or more scheduled scaling action updates in a BatchPutScheduledUpdateGroupAction operation.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

", + "base": "

Describes information used for one or more scheduled scaling action updates in a BatchPutScheduledUpdateGroupAction operation.

", "refs": { "ScheduledUpdateGroupActionRequests$member": null } @@ -1627,12 +1627,12 @@ "LaunchConfiguration$CreatedTime": "

The creation date and time for the launch configuration.

", "PutScheduledUpdateGroupActionType$Time": "

This parameter is no longer used.

", "PutScheduledUpdateGroupActionType$StartTime": "

The date and time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, \"2019-06-01T00:00:00Z\").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

", - "PutScheduledUpdateGroupActionType$EndTime": "

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

", + "PutScheduledUpdateGroupActionType$EndTime": "

The date and time for the recurring schedule to end, in UTC.

", "ScheduledUpdateGroupAction$Time": "

This parameter is no longer used.

", "ScheduledUpdateGroupAction$StartTime": "

The date and time in UTC for this action to start. For example, \"2019-06-01T00:00:00Z\".

", "ScheduledUpdateGroupAction$EndTime": "

The date and time in UTC for the recurring schedule to end. For example, \"2019-06-01T00:00:00Z\".

", "ScheduledUpdateGroupActionRequest$StartTime": "

The date and time for the action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, \"2019-06-01T00:00:00Z\").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule the action in the past, Amazon EC2 Auto Scaling returns an error message.

", - "ScheduledUpdateGroupActionRequest$EndTime": "

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

" + "ScheduledUpdateGroupActionRequest$EndTime": "

The date and time for the recurring schedule to end, in UTC.

" } }, "UpdateAutoScalingGroupType": { @@ -1840,7 +1840,8 @@ "PutScalingPolicyType$AdjustmentType": "

Specifies how the scaling adjustment is interpreted (for example, an absolute number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

Required if the policy type is StepScaling or SimpleScaling. For more information, see Scaling adjustment types in the Amazon EC2 Auto Scaling User Guide.

", "PutScheduledUpdateGroupActionType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "PutScheduledUpdateGroupActionType$ScheduledActionName": "

The name of this scaling action.

", - "PutScheduledUpdateGroupActionType$Recurrence": "

The recurring schedule for this action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

", + "PutScheduledUpdateGroupActionType$Recurrence": "

The recurring schedule for this action. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

Cron expressions use Universal Coordinated Time (UTC) by default.

", + "PutScheduledUpdateGroupActionType$TimeZone": "

Specifies the time zone for a cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones, derived from the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.

", "ResourceContentionFault$message": "

", "ResourceInUseFault$message": "

", "ScalingActivityInProgressFault$message": "

", @@ -1852,8 +1853,10 @@ "ScheduledUpdateGroupAction$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "ScheduledUpdateGroupAction$ScheduledActionName": "

The name of the scheduled action.

", "ScheduledUpdateGroupAction$Recurrence": "

The recurring schedule for the action, in Unix cron syntax format.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

", + "ScheduledUpdateGroupAction$TimeZone": "

The time zone for the cron expression.

", "ScheduledUpdateGroupActionRequest$ScheduledActionName": "

The name of the scaling action.

", - "ScheduledUpdateGroupActionRequest$Recurrence": "

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

", + "ScheduledUpdateGroupActionRequest$Recurrence": "

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

Cron expressions use Universal Coordinated Time (UTC) by default.

", + "ScheduledUpdateGroupActionRequest$TimeZone": "

Specifies the time zone for a cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones, derived from the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.

", "ServiceLinkedRoleFailure$message": null, "SetDesiredCapacityType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "SetInstanceProtectionQuery$AutoScalingGroupName": "

The name of the Auto Scaling group.

", diff --git a/models/apis/codeguruprofiler/2019-07-18/api-2.json b/models/apis/codeguruprofiler/2019-07-18/api-2.json index 05ea54a7051..97640c73e5e 100644 --- a/models/apis/codeguruprofiler/2019-07-18/api-2.json +++ b/models/apis/codeguruprofiler/2019-07-18/api-2.json @@ -91,6 +91,7 @@ "output":{"shape":"DeleteProfilingGroupResponse"}, "errors":[ {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} @@ -434,11 +435,11 @@ "AgentParameterField":{ "type":"string", "enum":[ - "MaxStackDepth", - "MemoryUsageLimitPercent", - "MinimumTimeForReportingInMilliseconds", + "SamplingIntervalInMilliseconds", "ReportingIntervalInMilliseconds", - "SamplingIntervalInMilliseconds" + "MinimumTimeForReportingInMilliseconds", + "MemoryUsageLimitPercent", + "MaxStackDepth" ] }, "AgentParameters":{ @@ -458,9 +459,9 @@ "AggregationPeriod":{ "type":"string", "enum":[ - "P1D", + "PT5M", "PT1H", - "PT5M" + "P1D" ] }, "Anomalies":{ @@ -588,8 +589,8 @@ "ComputePlatform":{ "type":"string", "enum":[ - "AWSLambda", - "Default" + "Default", + "AWSLambda" ] }, "ConfigureAgentRequest":{ @@ -704,8 +705,8 @@ "FeedbackType":{ "type":"string", "enum":[ - "Negative", - "Positive" + "Positive", + "Negative" ] }, "FindingsReportId":{ @@ -759,9 +760,13 @@ "values":{"shape":"FrameMetricValues"} } }, + "FrameMetricValue":{ + "type":"double", + "box":true + }, "FrameMetricValues":{ "type":"list", - "member":{"shape":"Double"} + "member":{"shape":"FrameMetricValue"} }, "FrameMetrics":{ "type":"list", @@ -951,7 +956,8 @@ }, "error":{"httpStatusCode":500}, "exception":true, - "fault":true + "fault":true, + "retryable":{"throttling":false} }, "ListFindingsReportsRequest":{ "type":"structure", @@ -1138,15 +1144,15 @@ "MetadataField":{ "type":"string", "enum":[ + "ComputePlatform", "AgentId", "AwsRequestId", - "ComputePlatform", "ExecutionEnvironment", "LambdaFunctionArn", "LambdaMemoryLimitInMB", - "LambdaPreviousExecutionTimeInMilliseconds", "LambdaRemainingTimeInMilliseconds", - "LambdaTimeGapBetweenInvokesInMilliseconds" + "LambdaTimeGapBetweenInvokesInMilliseconds", + "LambdaPreviousExecutionTimeInMilliseconds" ] }, "Metric":{ @@ -1175,8 +1181,8 @@ "OrderBy":{ "type":"string", "enum":[ - "TimestampAscending", - "TimestampDescending" + "TimestampDescending", + "TimestampAscending" ] }, "PaginationToken":{ @@ -1436,7 +1442,8 @@ "httpStatusCode":402, "senderFault":true }, - "exception":true + "exception":true, + "retryable":{"throttling":false} }, "String":{"type":"string"}, "Strings":{ @@ -1521,7 +1528,8 @@ "httpStatusCode":429, "senderFault":true }, - "exception":true + "exception":true, + "retryable":{"throttling":false} }, "Timestamp":{ "type":"timestamp", diff --git a/models/apis/codeguruprofiler/2019-07-18/docs-2.json b/models/apis/codeguruprofiler/2019-07-18/docs-2.json index 3dbda5b3c1a..1825d17084f 100644 --- a/models/apis/codeguruprofiler/2019-07-18/docs-2.json +++ b/models/apis/codeguruprofiler/2019-07-18/docs-2.json @@ -1,10 +1,10 @@ { "version": "2.0", - "service": "

This section provides documentation for the Amazon CodeGuru Profiler API operations.

 <p>Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks. </p> <p>Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization. </p> <note> <p>Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages.</p> </note> <p> For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html">What is Amazon CodeGuru Profiler</a> in the <i>Amazon CodeGuru Profiler User Guide</i>. </p> 
", + "service": "

This section provides documentation for the Amazon CodeGuru Profiler API operations.

Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks.

Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization.

Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages and Python. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages and Python.

For more information, see What is Amazon CodeGuru Profiler in the Amazon CodeGuru Profiler User Guide.

", "operations": { "AddNotificationChannels": "

Add up to 2 anomaly notifications channels for a profiling group.

", "BatchGetFrameMetricData": "

Returns the time series of values for a requested list of frame metrics from a time period.

", - "ConfigureAgent": "

Used by profiler agents to report their current state and to receive remote configuration updates. For example, ConfigureAgent can be used to tell and agent whether to profile or not and for how long to return profiling data.

", + "ConfigureAgent": "

Used by profiler agents to report their current state and to receive remote configuration updates. For example, ConfigureAgent can be used to tell an agent whether to profile or not and for how long to return profiling data.

", "CreateProfilingGroup": "

Creates a profiling group.

", "DeleteProfilingGroup": "

Deletes a profiling group.

", "DescribeProfilingGroup": "

Returns a ProfilingGroupDescription object that contains information about the requested profiling group.

", @@ -235,7 +235,6 @@ "Double": { "base": null, "refs": { - "FrameMetricValues$member": null, "Match$thresholdBreachValue": "

The value in the profile data that exceeded the recommendation threshold.

", "Recommendation$allMatchesSum": "

How much of the total sample count is potentially affected.

" } @@ -303,6 +302,12 @@ "FrameMetricData$member": null } }, + "FrameMetricValue": { + "base": null, + "refs": { + "FrameMetricValues$member": null + } + }, "FrameMetricValues": { "base": null, "refs": { diff --git a/models/apis/elasticfilesystem/2015-02-01/api-2.json b/models/apis/elasticfilesystem/2015-02-01/api-2.json index b30055ce755..2113daab6f7 100644 --- a/models/apis/elasticfilesystem/2015-02-01/api-2.json +++ b/models/apis/elasticfilesystem/2015-02-01/api-2.json @@ -44,7 +44,8 @@ {"shape":"FileSystemAlreadyExists"}, {"shape":"FileSystemLimitExceeded"}, {"shape":"InsufficientThroughputCapacity"}, - {"shape":"ThroughputLimitExceeded"} + {"shape":"ThroughputLimitExceeded"}, + {"shape":"UnsupportedAvailabilityZone"} ] }, "CreateMountTarget":{ @@ -68,7 +69,8 @@ {"shape":"NetworkInterfaceLimitExceeded"}, {"shape":"SecurityGroupLimitExceeded"}, {"shape":"SecurityGroupNotFound"}, - {"shape":"UnsupportedAvailabilityZone"} + {"shape":"UnsupportedAvailabilityZone"}, + {"shape":"AvailabilityZonesMismatch"} ] }, "CreateTags":{ @@ -436,7 +438,11 @@ "error":{"httpStatusCode":409}, "exception":true }, - "AccessPointArn":{"type":"string"}, + "AccessPointArn":{ + "type":"string", + "max":128, + "pattern":"^arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:access-point/fsap-[0-9a-f]{8,40}$" + }, "AccessPointDescription":{ "type":"structure", "members":{ @@ -456,7 +462,11 @@ "type":"list", "member":{"shape":"AccessPointDescription"} }, - "AccessPointId":{"type":"string"}, + "AccessPointId":{ + "type":"string", + "max":128, + "pattern":"^(arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:access-point/fsap-[0-9a-f]{8,40}|fsap-[0-9a-f]{8,40})$" + }, "AccessPointLimitExceeded":{ "type":"structure", "required":["ErrorCode"], @@ -478,12 +488,27 @@ "exception":true }, "AvailabilityZoneId":{"type":"string"}, - "AvailabilityZoneName":{"type":"string"}, + "AvailabilityZoneName":{ + "type":"string", + "max":64, + "min":1, + "pattern":".+" + }, + "AvailabilityZonesMismatch":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, "AwsAccountId":{ "type":"string", "max":14, "pattern":"^(\\d{12})|(\\d{4}-\\d{4}-\\d{4})$" }, + "Backup":{"type":"boolean"}, "BackupPolicy":{ "type":"structure", "required":["Status"], @@ -511,7 +536,8 @@ "ClientToken":{ "type":"string", "max":64, - "min":1 + "min":1, + "pattern":".+" }, "CreateAccessPointRequest":{ "type":"structure", @@ -543,6 +569,8 @@ "KmsKeyId":{"shape":"KmsKeyId"}, "ThroughputMode":{"shape":"ThroughputMode"}, "ProvisionedThroughputInMibps":{"shape":"ProvisionedThroughputInMibps"}, + "AvailabilityZoneName":{"shape":"AvailabilityZoneName"}, + "Backup":{"shape":"Backup"}, "Tags":{"shape":"Tags"} } }, @@ -895,6 +923,8 @@ "KmsKeyId":{"shape":"KmsKeyId"}, "ThroughputMode":{"shape":"ThroughputMode"}, "ProvisionedThroughputInMibps":{"shape":"ProvisionedThroughputInMibps"}, + "AvailabilityZoneName":{"shape":"AvailabilityZoneName"}, + "AvailabilityZoneId":{"shape":"AvailabilityZoneId"}, "Tags":{"shape":"Tags"} } }, @@ -1044,7 +1074,8 @@ "available", "updating", "deleting", - "deleted" + "deleted", + "error" ] }, "LifecycleConfigurationDescription":{ @@ -1207,7 +1238,8 @@ "Path":{ "type":"string", "max":100, - "min":1 + "min":1, + "pattern":"^(\\/|(\\/(?!\\.)+[^$#<>;`|&?{}^*/\\n]+){1,4})$" }, "PerformanceMode":{ "type":"string", @@ -1218,9 +1250,16 @@ }, "Permissions":{ "type":"string", + "max":4, + "min":3, "pattern":"^[0-7]{3,4}$" }, - "Policy":{"type":"string"}, + "Policy":{ + "type":"string", + "max":20000, + "min":1, + "pattern":"[\\s\\S]+" + }, "PolicyNotFound":{ "type":"structure", "members":{ @@ -1292,7 +1331,11 @@ "LifecyclePolicies":{"shape":"LifecyclePolicies"} } }, - "ResourceId":{"type":"string"}, + "ResourceId":{ + "type":"string", + "max":128, + "pattern":"^(arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:(access-point/fsap|file-system/fs)-[0-9a-f]{8,40}|fs(ap)?-[0-9a-f]{8,40})$" + }, "RootDirectory":{ "type":"structure", "members":{ @@ -1427,7 +1470,12 @@ ] }, "Timestamp":{"type":"timestamp"}, - "Token":{"type":"string"}, + "Token":{ + "type":"string", + "max":128, + "min":1, + "pattern":".+" + }, "TooManyRequests":{ "type":"structure", "required":["ErrorCode"], diff --git a/models/apis/elasticfilesystem/2015-02-01/docs-2.json b/models/apis/elasticfilesystem/2015-02-01/docs-2.json index 6c5a9a24127..aaf9be37cfa 100644 --- a/models/apis/elasticfilesystem/2015-02-01/docs-2.json +++ b/models/apis/elasticfilesystem/2015-02-01/docs-2.json @@ -2,9 +2,9 @@ "version": "2.0", "service": "Amazon Elastic File System

Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so your applications have the storage they need, when they need it. For more information, see the User Guide.

", "operations": { - "CreateAccessPoint": "

Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in its own directory and below. To learn more, see Mounting a File System Using EFS Access Points.

This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

", - "CreateFileSystem": "

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation also takes an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS: Performance Modes.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

", - "CreateMountTarget": "

Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. For more information, see Amazon EFS: How it Works.

In the request, you also specify a file system ID for which you are creating the mount target and the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, you also provide a subnet ID, which determines the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

", + "CreateAccessPoint": "

Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in its own directory and below. To learn more, see Mounting a file system using EFS access points.

This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

", + "CreateFileSystem": "

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

For more information, see Creating a file system in the Amazon EFS User Guide.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

You can set the throughput mode for the file system using the ThroughputMode parameter.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

", + "CreateMountTarget": "

Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

You can create only one mount target for an EFS file system using One Zone storage classes. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

For more information, see Amazon EFS: How it Works.

To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, provide the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

", "CreateTags": "

Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. If you add the Name tag to your file system, Amazon EFS returns it in the response to the DescribeFileSystems operation.

This operation requires permission for the elasticfilesystem:CreateTags action.

", "DeleteAccessPoint": "

Deletes the specified access point. After deletion is complete, new clients can no longer connect to the access points. Clients connected to the access point at the time of deletion will continue to function until they terminate their connection.

This operation requires permissions for the elasticfilesystem:DeleteAccessPoint action.

", "DeleteFileSystem": "

Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system.

You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. For more information, see DescribeMountTargets and DeleteMountTarget.

The DeleteFileSystem call returns while the file system state is still deleting. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass file system ID or creation token for the deleted file system, the DescribeFileSystems returns a 404 FileSystemNotFound error.

This operation requires permissions for the elasticfilesystem:DeleteFileSystem action.

", @@ -22,7 +22,7 @@ "ListTagsForResource": "

Lists all tags for a top-level EFS resource. You must provide the ID of the resource that you want to retrieve the tags for.

This operation requires permissions for the elasticfilesystem:DescribeAccessPoints action.

", "ModifyMountTargetSecurityGroups": "

Modifies the set of security groups in effect for a mount target.

When you create a mount target, Amazon EFS also creates a new network interface. For more information, see CreateMountTarget. This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

The operation requires permissions for the following actions:

", "PutBackupPolicy": "

Updates the file system's backup policy. Use this action to start or stop automatic backups of the file system.

", - "PutFileSystemPolicy": "

Applies an Amazon EFS FileSystemPolicy to an Amazon EFS file system. A file system policy is an IAM resource-based policy and can contain multiple policy statements. A file system always has exactly one file system policy, which can be the default policy or an explicit policy set or updated using this API operation. When an explicit policy is set, it overrides the default policy. For more information about the default file system policy, see Default EFS File System Policy.

This operation requires permissions for the elasticfilesystem:PutFileSystemPolicy action.

", + "PutFileSystemPolicy": "

Applies an Amazon EFS FileSystemPolicy to an Amazon EFS file system. A file system policy is an IAM resource-based policy and can contain multiple policy statements. A file system always has exactly one file system policy, which can be the default policy or an explicit policy set or updated using this API operation. EFS file system policies have a 20,000 character limit. When an explicit policy is set, it overrides the default policy. For more information about the default file system policy, see Default EFS File System Policy.

EFS file system policies have a 20,000 character limit.

This operation requires permissions for the elasticfilesystem:PutFileSystemPolicy action.

", "PutLifecycleConfiguration": "

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

In the request, specify the following:

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

", "TagResource": "

Creates a tag for an EFS resource. You can create tags for EFS file systems and access points using this API operation.

This operation requires permissions for the elasticfilesystem:TagResource action.

", "UntagResource": "

Removes tags from an EFS resource. You can remove tags from EFS file systems and access points using this API operation.

This operation requires permissions for the elasticfilesystem:UntagResource action.

", @@ -75,13 +75,21 @@ "AvailabilityZoneId": { "base": null, "refs": { - "MountTargetDescription$AvailabilityZoneId": "

The unique and consistent identifier of the Availability Zone (AZ) that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every AWS account.

" + "FileSystemDescription$AvailabilityZoneId": "

The unique and consistent identifier of the Availability Zone in which the file system's One Zone storage classes exist. For example, use1-az1 is an Availability Zone ID for the us-east-1 AWS Region, and it has the same location in every AWS account.

", + "MountTargetDescription$AvailabilityZoneId": "

The unique and consistent identifier of the Availability Zone that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every AWS account.

" } }, "AvailabilityZoneName": { "base": null, "refs": { - "MountTargetDescription$AvailabilityZoneName": "

The name of the Availability Zone (AZ) that the mount target resides in. AZs are independently mapped to names for each AWS account. For example, the Availability Zone us-east-1a for your AWS account might not be the same location as us-east-1a for another AWS account.

" + "CreateFileSystemRequest$AvailabilityZoneName": "

Used to create a file system that uses One Zone storage classes. It specifies the AWS Availability Zone in which to create the file system. Use the format us-east-1a to specify the Availability Zone. For more information about One Zone storage classes, see Using EFS storage classes in the Amazon EFS User Guide.

One Zone storage classes are not available in all Availability Zones in AWS Regions where Amazon EFS is available.

", + "FileSystemDescription$AvailabilityZoneName": "

Describes the AWS Availability Zone in which the file system is located, and is valid only for file systems using One Zone storage classes. For more information, see Using EFS storage classes in the Amazon EFS User Guide.

", + "MountTargetDescription$AvailabilityZoneName": "

The name of the Availability Zone in which the mount target is located. Availability Zones are independently mapped to names for each AWS account. For example, the Availability Zone us-east-1a for your AWS account might not be the same location as us-east-1a for another AWS account.

" + } + }, + "AvailabilityZonesMismatch": { + "base": "

Returned if the Availability Zone that was specified for a mount target is different from the Availability Zone that was specified for One Zone storage classes. For more information, see Regional and One Zone storage redundancy.

", + "refs": { } }, "AwsAccountId": { @@ -92,8 +100,14 @@ "MountTargetDescription$OwnerId": "

AWS account ID that owns the resource.

" } }, + "Backup": { + "base": null, + "refs": { + "CreateFileSystemRequest$Backup": "

Specifies whether automatic backups are enabled on the file system that you are creating. Set the value to true to enable automatic backups. If you are creating a file system that uses One Zone storage classes, automatic backups are enabled by default. For more information, see Automatic backups in the Amazon EFS User Guide.

Default is false. However, if you specify an AvailabilityZoneName, the default is true.

AWS Backup is not available in all AWS Regions where Amazon EFS is available.

" + } + }, "BackupPolicy": { - "base": "

The backup policy for the file system, showing the curent status. If ENABLED, the file system is being backed up.

", + "base": "

The backup policy for the file system used to create automatic daily backups. If status has a value of ENABLED, the file system is being automatically backed up. For more information, see Automatic backups.

", "refs": { "BackupPolicyDescription$BackupPolicy": "

Describes the file system's backup policy, indicating whether automatic backups are turned on or off..

", "PutBackupPolicyRequest$BackupPolicy": "

The backup policy included in the PutBackupPolicy request.

" @@ -143,7 +157,7 @@ } }, "CreationInfo": { - "base": "

Required if the RootDirectory > Path specified does not exist. Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory > Path. If the access point root directory does not exist, EFS creates it with these settings when a client connects to the access point. When specifying CreationInfo, you must include values for all properties.

If you do not provide CreationInfo and the specified RootDirectory does not exist, attempts to mount the file system using the access point will fail.

", + "base": "

Required if the RootDirectory > Path specified does not exist. Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory > Path. If the access point root directory does not exist, EFS creates it with these settings when a client connects to the access point. When specifying CreationInfo, you must include values for all properties.

Amazon EFS creates a root directory only if you have provided the CreationInfo: OwnUid, OwnGID, and permissions for the directory. If you do not provide this information, Amazon EFS does not create the root directory. If the root directory does not exist, attempts to mount using the access point will fail.

If you do not provide CreationInfo and the specified RootDirectory does not exist, attempts to mount the file system using the access point will fail.

", "refs": { "RootDirectory$CreationInfo": "

(Optional) Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory. If the RootDirectory > Path specified does not exist, EFS creates the root directory using the CreationInfo settings when a client connects to an access point. When specifying the CreationInfo, you must provide values for all properties.

If you do not provide CreationInfo and the specified RootDirectory > Path does not exist, attempts to mount the file system using the access point will fail.

" } @@ -264,6 +278,7 @@ "AccessPointAlreadyExists$ErrorCode": null, "AccessPointLimitExceeded$ErrorCode": null, "AccessPointNotFound$ErrorCode": null, + "AvailabilityZonesMismatch$ErrorCode": null, "BadRequest$ErrorCode": null, "DependencyTimeout$ErrorCode": null, "FileSystemAlreadyExists$ErrorCode": null, @@ -296,6 +311,7 @@ "AccessPointAlreadyExists$Message": null, "AccessPointLimitExceeded$Message": null, "AccessPointNotFound$Message": null, + "AvailabilityZonesMismatch$Message": null, "BadRequest$Message": null, "DependencyTimeout$Message": null, "FileSystemAlreadyExists$Message": null, @@ -429,7 +445,7 @@ } }, "InsufficientThroughputCapacity": { - "base": "

Returned if there's not enough capacity to provision additional throughput. This value might be returned when you try to create a file system in provisioned throughput mode, when you attempt to increase the provisioned throughput of an existing file system, or when you attempt to change an existing file system from bursting to provisioned throughput mode.

", + "base": "

Returned if there's not enough capacity to provision additional throughput. This value might be returned when you try to create a file system in provisioned throughput mode, when you attempt to increase the provisioned throughput of an existing file system, or when you attempt to change an existing file system from bursting to provisioned throughput mode. Try again later.

", "refs": { } }, @@ -458,7 +474,7 @@ "KmsKeyId": { "base": null, "refs": { - "CreateFileSystemRequest$KmsKeyId": "

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a nondefault CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

EFS accepts only symmetric CMKs. You cannot use asymmetric CMKs with EFS file systems.

", + "CreateFileSystemRequest$KmsKeyId": "

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

EFS accepts only symmetric CMKs. You cannot use asymmetric CMKs with EFS file systems.

", "FileSystemDescription$KmsKeyId": "

The ID of an AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the encrypted file system.

" } }, @@ -521,7 +537,7 @@ } }, "MaxResults": { - "base": null, + "base": "Max results used for pagination.", "refs": { "DescribeAccessPointsRequest$MaxResults": "

(Optional) When retrieving all access points for a file system, you can optionally specify the MaxItems parameter to limit the number of objects returned in a response. The default value is 100.

", "ListTagsForResourceRequest$MaxResults": "

(Optional) Specifies the maximum number of tag objects to return in the response. The default value is 100.

" @@ -613,7 +629,7 @@ "PerformanceMode": { "base": null, "refs": { - "CreateFileSystemRequest$PerformanceMode": "

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

", + "CreateFileSystemRequest$PerformanceMode": "

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

The maxIO mode is not supported on file systems using One Zone storage classes.

", "FileSystemDescription$PerformanceMode": "

The performance mode of the file system.

" } }, @@ -627,7 +643,7 @@ "base": null, "refs": { "FileSystemPolicyDescription$Policy": "

The JSON formatted FileSystemPolicy for the EFS file system.

", - "PutFileSystemPolicyRequest$Policy": "

The FileSystemPolicy that you're creating. Accepts a JSON formatted policy definition. To find out more about the elements that make up a file system policy, see EFS Resource-based Policies.

" + "PutFileSystemPolicyRequest$Policy": "

The FileSystemPolicy that you're creating. Accepts a JSON formatted policy definition. EFS file system policies have a 20,000 character limit. To find out more about the elements that make up a file system policy, see EFS Resource-based Policies.

" } }, "PolicyNotFound": { @@ -645,9 +661,9 @@ "ProvisionedThroughputInMibps": { "base": null, "refs": { - "CreateFileSystemRequest$ProvisionedThroughputInMibps": "

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. You can get this limit increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

", - "FileSystemDescription$ProvisionedThroughputInMibps": "

The throughput, measured in MiB/s, that you want to provision for a file system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

", - "UpdateFileSystemRequest$ProvisionedThroughputInMibps": "

(Optional) The amount of throughput, in MiB/s, that you want to provision for your file system. Valid values are 1-1024. Required if ThroughputMode is changed to provisioned on update. If you're not updating the amount of provisioned throughput for your file system, you don't need to provide this value in your request.

" + "CreateFileSystemRequest$ProvisionedThroughputInMibps": "

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase this limit, contact AWS Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

", + "FileSystemDescription$ProvisionedThroughputInMibps": "

The amount of provisioned throughput, measured in MiB/s, for the file system. Valid for file systems using ThroughputMode set to provisioned.

", + "UpdateFileSystemRequest$ProvisionedThroughputInMibps": "

(Optional) Sets the amount of provisioned throughput, in MiB/s, for the file system. Valid values are 1-1024. If you are changing the throughput mode to provisioned, you must also provide the amount of provisioned throughput. Required if ThroughputMode is changed to provisioned on update.

" } }, "PutBackupPolicyRequest": { @@ -677,7 +693,7 @@ "base": "

Specifies the directory on the Amazon EFS file system that the access point provides access to. The access point exposes the specified file system path as the root directory of your file system to applications using the access point. NFS clients using the access point can only access data in the access point's RootDirectory and it's subdirectories.

", "refs": { "AccessPointDescription$RootDirectory": "

The directory on the Amazon EFS file system that the access point exposes as the root directory to NFS clients using the access point.

", - "CreateAccessPointRequest$RootDirectory": "

Specifies the directory on the Amazon EFS file system that the access point exposes as the root directory of your file system to NFS clients using the access point. The clients using the access point can only access the root directory and below. If the RootDirectory > Path specified does not exist, EFS creates it and applies the CreationInfo settings when a client connects to an access point. When specifying a RootDirectory, you need to provide the Path, and the CreationInfo is optional.

" + "CreateAccessPointRequest$RootDirectory": "

Specifies the directory on the Amazon EFS file system that the access point exposes as the root directory of your file system to NFS clients using the access point. The clients using the access point can only access the root directory and below. If the RootDirectory > Path specified does not exist, EFS creates it and applies the CreationInfo settings when a client connects to an access point. When specifying a RootDirectory, you need to provide the Path, and the CreationInfo.

Amazon EFS creates a root directory only if you have provided the CreationInfo: OwnUid, OwnGID, and permissions for the directory. If you do not provide this information, Amazon EFS does not create the root directory. If the root directory does not exist, attempts to mount using the access point will fail.

" } }, "SecondaryGids": { @@ -713,13 +729,13 @@ "Status": { "base": null, "refs": { - "BackupPolicy$Status": "

Describes the status of the file system's backup policy.

" + "BackupPolicy$Status": "

Describes the status of the file system's backup policy.

" } }, "SubnetId": { "base": null, "refs": { - "CreateMountTargetRequest$SubnetId": "

The ID of the subnet to add the mount target in.

", + "CreateMountTargetRequest$SubnetId": "

The ID of the subnet to add the mount target in. For file systems that use One Zone storage classes, use the subnet that is associated with the file system's Availability Zone.

", "MountTargetDescription$SubnetId": "

The ID of the mount target's subnet.

" } }, @@ -745,7 +761,7 @@ "base": null, "refs": { "DeleteTagsRequest$TagKeys": "

A list of tag keys to delete.

", - "UntagResourceRequest$TagKeys": "

The keys of the key:value tag pairs that you want to remove from the specified EFS resource.

" + "UntagResourceRequest$TagKeys": "

The keys of the key-value tag pairs that you want to remove from the specified EFS resource.

" } }, "TagResourceRequest": { @@ -781,9 +797,9 @@ "ThroughputMode": { "base": null, "refs": { - "CreateFileSystemRequest$ThroughputMode": "

The throughput mode for the file system to be created. There are two throughput modes to choose from for your file system: bursting and provisioned. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps. You can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes as long as it’s been more than 24 hours since the last decrease or throughput mode change. For more, see Specifying Throughput with Provisioned Mode in the Amazon EFS User Guide.

", - "FileSystemDescription$ThroughputMode": "

The throughput mode for a file system. There are two throughput modes to choose from for your file system: bursting and provisioned. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps. You can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes as long as it’s been more than 24 hours since the last decrease or throughput mode change.

", - "UpdateFileSystemRequest$ThroughputMode": "

(Optional) The throughput mode that you want your file system to use. If you're not updating your throughput mode, you don't need to provide this value in your request. If you are changing the ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps.

" + "CreateFileSystemRequest$ThroughputMode": "

Specifies the throughput mode for the file system, either bursting or provisioned. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps. After you create the file system, you can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes, as long as it’s been more than 24 hours since the last decrease or throughput mode change. For more information, see Specifying throughput with provisioned mode in the Amazon EFS User Guide.

Default is bursting.

", + "FileSystemDescription$ThroughputMode": "

Displays the file system's throughput mode. For more information, see Throughput modes in the Amazon EFS User Guide.

", + "UpdateFileSystemRequest$ThroughputMode": "

(Optional) Updates the file system's throughput mode. If you're not updating your throughput mode, you don't need to provide this value in your request. If you are changing the ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps.

" } }, "Timestamp": { @@ -794,7 +810,7 @@ } }, "Token": { - "base": null, + "base": "Token used for pagination.", "refs": { "DescribeAccessPointsRequest$NextToken": "

NextToken is present if the response is paginated. You can use NextMarker in the subsequent request to fetch the next page of access point descriptions.

", "DescribeAccessPointsResponse$NextToken": "

Present if there are more access points than returned in the response. You can use the NextMarker in the subsequent request to fetch the additional descriptions.

", @@ -820,7 +836,7 @@ } }, "UnsupportedAvailabilityZone": { - "base": "

", + "base": "

Returned if the requested Amazon EFS functionality is not available in the specified Availability Zone.

", "refs": { } }, @@ -835,14 +851,14 @@ } }, "ValidationException": { - "base": "

Returned if the AWS Backup service is not available in the region that the request was made.

", + "base": "

Returned if the AWS Backup service is not available in the Region in which the request was made.

", "refs": { } }, "VpcId": { "base": null, "refs": { - "MountTargetDescription$VpcId": "

The Virtual Private Cloud (VPC) ID that the mount target is configured in.

" + "MountTargetDescription$VpcId": "

The virtual private cloud (VPC) ID that the mount target is configured in.

" } } } diff --git a/models/apis/elasticfilesystem/2015-02-01/examples-1.json b/models/apis/elasticfilesystem/2015-02-01/examples-1.json index b5c988cac35..f3c75b34344 100644 --- a/models/apis/elasticfilesystem/2015-02-01/examples-1.json +++ b/models/apis/elasticfilesystem/2015-02-01/examples-1.json @@ -4,7 +4,9 @@ "CreateFileSystem": [ { "input": { + "Backup": true, "CreationToken": "tokenstring", + "Encrypted": true, "PerformanceMode": "generalPurpose", "Tags": [ { @@ -16,6 +18,7 @@ "output": { "CreationTime": "1481841524.0", "CreationToken": "tokenstring", + "Encrypted": true, "FileSystemId": "fs-01234567", "LifeCycleState": "creating", "NumberOfMountTargets": 0, @@ -37,7 +40,7 @@ "output": { } }, - "description": "This operation creates a new file system with the default generalpurpose performance mode.", + "description": "This operation creates a new, encrypted file system with automatic backups enabled, and the default generalpurpose performance mode.", "id": "to-create-a-new-file-system-1481840798547", "title": "To create a new file system" } diff --git a/models/apis/iotwireless/2020-11-22/api-2.json b/models/apis/iotwireless/2020-11-22/api-2.json index 21a135a4d82..d831e252596 100644 --- a/models/apis/iotwireless/2020-11-22/api-2.json +++ b/models/apis/iotwireless/2020-11-22/api-2.json @@ -2338,6 +2338,7 @@ }, "Seq":{ "type":"integer", + "max":16383, "min":0 }, "ServiceProfile":{ diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 6ffb8db88ce..dc0bddf1c12 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -432,6 +432,25 @@ {"shape":"DBProxyQuotaExceededFault"} ] }, + "CreateDBProxyEndpoint":{ + "name":"CreateDBProxyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBProxyEndpointRequest"}, + "output":{ + "shape":"CreateDBProxyEndpointResponse", + "resultWrapper":"CreateDBProxyEndpointResult" + }, + "errors":[ + {"shape":"InvalidSubnet"}, + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyEndpointAlreadyExistsFault"}, + {"shape":"DBProxyEndpointQuotaExceededFault"}, + {"shape":"InvalidDBProxyStateFault"} + ] + }, "CreateDBSecurityGroup":{ "name":"CreateDBSecurityGroup", "http":{ @@ -685,6 +704,22 @@ {"shape":"InvalidDBProxyStateFault"} ] }, + "DeleteDBProxyEndpoint":{ + "name":"DeleteDBProxyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBProxyEndpointRequest"}, + "output":{ + "shape":"DeleteDBProxyEndpointResponse", + "resultWrapper":"DeleteDBProxyEndpointResult" + }, + "errors":[ + {"shape":"DBProxyEndpointNotFoundFault"}, + {"shape":"InvalidDBProxyEndpointStateFault"} + ] + }, "DeleteDBSecurityGroup":{ "name":"DeleteDBSecurityGroup", "http":{ @@ -1053,6 +1088,22 @@ {"shape":"DBProxyNotFoundFault"} ] }, + "DescribeDBProxyEndpoints":{ + "name":"DescribeDBProxyEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBProxyEndpointsRequest"}, + "output":{ + "shape":"DescribeDBProxyEndpointsResponse", + "resultWrapper":"DescribeDBProxyEndpointsResult" + }, + "errors":[ + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyEndpointNotFoundFault"} + ] + }, "DescribeDBProxyTargetGroups":{ "name":"DescribeDBProxyTargetGroups", "http":{ @@ -1630,6 +1681,24 @@ {"shape":"InvalidDBProxyStateFault"} ] }, + "ModifyDBProxyEndpoint":{ + "name":"ModifyDBProxyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBProxyEndpointRequest"}, + "output":{ + "shape":"ModifyDBProxyEndpointResponse", + "resultWrapper":"ModifyDBProxyEndpointResult" + }, + "errors":[ + {"shape":"DBProxyEndpointNotFoundFault"}, + {"shape":"DBProxyEndpointAlreadyExistsFault"}, + {"shape":"InvalidDBProxyEndpointStateFault"}, + {"shape":"InvalidDBProxyStateFault"} + ] + }, "ModifyDBProxyTargetGroup":{ "name":"ModifyDBProxyTargetGroup", "http":{ @@ -3025,6 +3094,28 @@ "DBParameterGroup":{"shape":"DBParameterGroup"} } }, + "CreateDBProxyEndpointRequest":{ + "type":"structure", + "required":[ + "DBProxyName", + "DBProxyEndpointName", + "VpcSubnetIds" + ], + "members":{ + "DBProxyName":{"shape":"DBProxyName"}, + "DBProxyEndpointName":{"shape":"DBProxyEndpointName"}, + "VpcSubnetIds":{"shape":"StringList"}, + "VpcSecurityGroupIds":{"shape":"StringList"}, + "TargetRole":{"shape":"DBProxyEndpointTargetRole"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBProxyEndpointResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoint":{"shape":"DBProxyEndpoint"} + } + }, "CreateDBProxyRequest":{ "type":"structure", "required":[ @@ -4054,6 +4145,7 @@ "DBProxyArn":{"shape":"String"}, "Status":{"shape":"DBProxyStatus"}, "EngineFamily":{"shape":"String"}, + "VpcId":{"shape":"String"}, "VpcSecurityGroupIds":{"shape":"StringList"}, "VpcSubnetIds":{"shape":"StringList"}, "Auth":{"shape":"UserAuthConfigInfoList"}, @@ -4071,16 +4163,99 @@ "members":{ }, "error":{ - "code":"DBProxyTargetExistsFault", + "code":"DBProxyAlreadyExistsFault", "httpStatusCode":400, "senderFault":true }, "exception":true }, + "DBProxyEndpoint":{ + "type":"structure", + "members":{ + "DBProxyEndpointName":{"shape":"String"}, + "DBProxyEndpointArn":{"shape":"String"}, + "DBProxyName":{"shape":"String"}, + "Status":{"shape":"DBProxyEndpointStatus"}, + "VpcId":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"StringList"}, + "VpcSubnetIds":{"shape":"StringList"}, + "Endpoint":{"shape":"String"}, + "CreatedDate":{"shape":"TStamp"}, + "TargetRole":{"shape":"DBProxyEndpointTargetRole"}, + "IsDefault":{"shape":"Boolean"} + } + }, + "DBProxyEndpointAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBProxyEndpointAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBProxyEndpointList":{ + "type":"list", + "member":{"shape":"DBProxyEndpoint"} + }, + "DBProxyEndpointName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, + "DBProxyEndpointNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBProxyEndpointNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBProxyEndpointQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBProxyEndpointQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBProxyEndpointStatus":{ + "type":"string", + "enum":[ + "available", + "modifying", + "incompatible-network", + "insufficient-resource-limits", + "creating", + "deleting" + ] + }, + "DBProxyEndpointTargetRole":{ + "type":"string", + "enum":[ + "READ_WRITE", + "READ_ONLY" + ] + }, "DBProxyList":{ "type":"list", "member":{"shape":"DBProxy"} }, + "DBProxyName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, "DBProxyNotFoundFault":{ "type":"structure", "members":{ @@ -4126,6 +4301,7 @@ "RdsResourceId":{"shape":"String"}, "Port":{"shape":"Integer"}, "Type":{"shape":"TargetType"}, + "Role":{"shape":"TargetRole"}, "TargetHealth":{"shape":"TargetHealth"} } }, @@ -4555,6 +4731,19 @@ "DBParameterGroupName":{"shape":"String"} } }, + "DeleteDBProxyEndpointRequest":{ + "type":"structure", + "required":["DBProxyEndpointName"], + "members":{ + "DBProxyEndpointName":{"shape":"DBProxyEndpointName"} + } + }, + "DeleteDBProxyEndpointResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoint":{"shape":"DBProxyEndpoint"} + } + }, "DeleteDBProxyRequest":{ "type":"structure", "required":["DBProxyName"], @@ -4856,6 +5045,23 @@ "Marker":{"shape":"String"} } }, + "DescribeDBProxyEndpointsRequest":{ + "type":"structure", + "members":{ + "DBProxyName":{"shape":"DBProxyName"}, + "DBProxyEndpointName":{"shape":"DBProxyEndpointName"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeDBProxyEndpointsResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoints":{"shape":"DBProxyEndpointList"}, + "Marker":{"shape":"String"} + } + }, "DescribeDBProxyTargetGroupsRequest":{ "type":"structure", "required":["DBProxyName"], @@ -5796,6 +6002,17 @@ }, "exception":true }, + "InvalidDBProxyEndpointStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBProxyEndpointStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBProxyStateFault":{ "type":"structure", "members":{ @@ -6187,6 +6404,21 @@ "Parameters":{"shape":"ParametersList"} } }, + "ModifyDBProxyEndpointRequest":{ + "type":"structure", + "required":["DBProxyEndpointName"], + "members":{ + "DBProxyEndpointName":{"shape":"DBProxyEndpointName"}, + "NewDBProxyEndpointName":{"shape":"DBProxyEndpointName"}, + "VpcSecurityGroupIds":{"shape":"StringList"} + } + }, + "ModifyDBProxyEndpointResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoint":{"shape":"DBProxyEndpoint"} + } + }, "ModifyDBProxyRequest":{ "type":"structure", "required":["DBProxyName"], @@ -7799,13 +8031,22 @@ "UNREACHABLE", "CONNECTION_FAILED", "AUTH_FAILURE", - "PENDING_PROXY_CAPACITY" + "PENDING_PROXY_CAPACITY", + "INVALID_REPLICATION_STATE" ] }, "TargetList":{ "type":"list", "member":{"shape":"DBProxyTarget"} }, + "TargetRole":{ + "type":"string", + "enum":[ + "READ_WRITE", + "READ_ONLY", + "UNKNOWN" + ] + }, "TargetState":{ "type":"string", "enum":[ diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index d968eb45958..c8c905afc07 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -24,6 +24,7 @@ "CreateDBInstanceReadReplica": "

Creates a new DB instance that acts as a read replica for an existing source DB instance. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this action. Call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All read replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified.

Your source DB instance must have backup retention enabled.

", "CreateDBParameterGroup": "

Creates a new DB parameter group.

A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

", "CreateDBProxy": "

Creates a new DB proxy.

", + "CreateDBProxyEndpoint": "

Creates a DBProxyEndpoint. Only applies to proxies that are associated with Aurora DB clusters. You can use DB proxy endpoints to specify read/write or read-only access to the DB cluster. You can also use DB proxy endpoints to access a DB proxy through a different VPC than the proxy's default VPC.

", "CreateDBSecurityGroup": "

Creates a new DB security group. DB security groups control access to a DB instance.

A DB security group controls access to EC2-Classic DB instances that are not in a VPC.

", "CreateDBSnapshot": "

Creates a snapshot of a DB instance. The source DB instance must be in the available or storage-optimization state.

", "CreateDBSubnetGroup": "

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

", @@ -38,7 +39,8 @@ "DeleteDBInstance": "

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true:

To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a read replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

", "DeleteDBInstanceAutomatedBackup": "

Deletes automated backups using the DbiResourceId value of the source DB instance or the Amazon Resource Name (ARN) of the automated backups.

", "DeleteDBParameterGroup": "

Deletes a specified DB parameter group. The DB parameter group to be deleted can't be associated with any DB instances.

", - "DeleteDBProxy": "

Deletes an existing proxy.

", + "DeleteDBProxy": "

Deletes an existing DB proxy.

", + "DeleteDBProxyEndpoint": "

Deletes a DBProxyEndpoint. Doing so removes the ability to access the DB proxy using the endpoint that you defined. The endpoint that you delete might have provided capabilities such as read/write or read-only operations, or using a different VPC than the DB proxy's default VPC.

", "DeleteDBSecurityGroup": "

Deletes a DB security group.

The specified DB security group must not be associated with any DB instances.

", "DeleteDBSnapshot": "

Deletes a DB snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB snapshot must be in the available state to be deleted.

", "DeleteDBSubnetGroup": "

Deletes a DB subnet group.

The specified database subnet group must not be associated with any DB instances.

", @@ -64,6 +66,7 @@ "DescribeDBParameterGroups": "

Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

", "DescribeDBParameters": "

Returns the detailed parameter list for a particular DB parameter group.

", "DescribeDBProxies": "

Returns information about DB proxies.

", + "DescribeDBProxyEndpoints": "

Returns information about DB proxy endpoints.

", "DescribeDBProxyTargetGroups": "

Returns information about DB proxy target groups, represented by DBProxyTargetGroup data structures.

", "DescribeDBProxyTargets": "

Returns information about DBProxyTarget objects. This API supports pagination.

", "DescribeDBSecurityGroups": "

Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

", @@ -100,6 +103,7 @@ "ModifyDBInstance": "

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance.

", "ModifyDBParameterGroup": "

Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

", "ModifyDBProxy": "

Changes the settings for an existing DB proxy.

", + "ModifyDBProxyEndpoint": "

Changes the settings for an existing DB proxy endpoint.

", "ModifyDBProxyTargetGroup": "

Modifies the properties of a DBProxyTargetGroup.

", "ModifyDBSnapshot": "

Updates a manual DB snapshot with a new engine version. The snapshot can be encrypted or unencrypted, but not shared or public.

Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL.

", "ModifyDBSnapshotAttribute": "

Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

To share a manual DB snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all AWS accounts.

Don't add the all value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts.

If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as values for the restore attribute.

", @@ -331,6 +335,7 @@ "DBInstanceStatusInfo$Normal": "

Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

", "DBProxy$RequireTLS": "

Indicates whether Transport Layer Security (TLS) encryption is required for connections to the proxy.

", "DBProxy$DebugLogging": "

Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs.

", + "DBProxyEndpoint$IsDefault": "

A value that indicates whether this endpoint is the default endpoint for the associated DB proxy. Default DB proxy endpoints always have read/write capability. Other endpoints that you associate with the DB proxy can be either read/write or read-only.

", "DBProxyTargetGroup$IsDefault": "

Whether this target group is the first one used for connection requests by the associated proxy. Because each proxy is currently associated with a single target group, currently this setting is always true.

", "DBSnapshot$Encrypted": "

Specifies whether the DB snapshot is encrypted.

", "DBSnapshot$IAMDatabaseAuthenticationEnabled": "

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

", @@ -695,6 +700,16 @@ "refs": { } }, + "CreateDBProxyEndpointRequest": { + "base": null, + "refs": { + } + }, + "CreateDBProxyEndpointResponse": { + "base": null, + "refs": { + } + }, "CreateDBProxyRequest": { "base": null, "refs": { @@ -1252,12 +1267,72 @@ "refs": { } }, + "DBProxyEndpoint": { + "base": "

The data structure representing an endpoint associated with a DB proxy. RDS automatically creates one endpoint for each DB proxy. For Aurora DB clusters, you can associate additional endpoints with the same DB proxy. These endpoints can be read/write or read-only. They can also reside in different VPCs than the associated DB proxy.

This data type is used as a response element in the DescribeDBProxyEndpoints operation.

", + "refs": { + "CreateDBProxyEndpointResponse$DBProxyEndpoint": "

The DBProxyEndpoint object that is created by the API operation. The DB proxy endpoint that you create might provide capabilities such as read/write or read-only operations, or using a different VPC than the proxy's default VPC.

", + "DBProxyEndpointList$member": null, + "DeleteDBProxyEndpointResponse$DBProxyEndpoint": "

The data structure representing the details of the DB proxy endpoint that you delete.

", + "ModifyDBProxyEndpointResponse$DBProxyEndpoint": "

The DBProxyEndpoint object representing the new settings for the DB proxy endpoint.

" + } + }, + "DBProxyEndpointAlreadyExistsFault": { + "base": "

The specified DB proxy endpoint name must be unique for all DB proxy endpoints owned by your AWS account in the specified AWS Region.

", + "refs": { + } + }, + "DBProxyEndpointList": { + "base": null, + "refs": { + "DescribeDBProxyEndpointsResponse$DBProxyEndpoints": "

The list of ProxyEndpoint objects returned by the API operation.

" + } + }, + "DBProxyEndpointName": { + "base": null, + "refs": { + "CreateDBProxyEndpointRequest$DBProxyEndpointName": "

The name of the DB proxy endpoint to create.

", + "DeleteDBProxyEndpointRequest$DBProxyEndpointName": "

The name of the DB proxy endpoint to delete.

", + "DescribeDBProxyEndpointsRequest$DBProxyEndpointName": "

The name of a DB proxy endpoint to describe. If you omit this parameter, the output includes information about all DB proxy endpoints associated with the specified proxy.

", + "ModifyDBProxyEndpointRequest$DBProxyEndpointName": "

The name of the DB proxy sociated with the DB proxy endpoint that you want to modify.

", + "ModifyDBProxyEndpointRequest$NewDBProxyEndpointName": "

The new identifier for the DBProxyEndpoint. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

" + } + }, + "DBProxyEndpointNotFoundFault": { + "base": "

The DB proxy endpoint doesn't exist.

", + "refs": { + } + }, + "DBProxyEndpointQuotaExceededFault": { + "base": "

The DB proxy already has the maximum number of endpoints.

", + "refs": { + } + }, + "DBProxyEndpointStatus": { + "base": null, + "refs": { + "DBProxyEndpoint$Status": "

The current status of this DB proxy endpoint. A status of available means the endpoint is ready to handle requests. Other values indicate that you must wait for the endpoint to be ready, or take some action to resolve an issue.

" + } + }, + "DBProxyEndpointTargetRole": { + "base": null, + "refs": { + "CreateDBProxyEndpointRequest$TargetRole": "

A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations. The default is READ_WRITE.

", + "DBProxyEndpoint$TargetRole": "

A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.

" + } + }, "DBProxyList": { "base": null, "refs": { "DescribeDBProxiesResponse$DBProxies": "

A return value representing an arbitrary number of DBProxy data structures.

" } }, + "DBProxyName": { + "base": null, + "refs": { + "CreateDBProxyEndpointRequest$DBProxyName": "

The name of the DB proxy associated with the DB proxy endpoint that you create.

", + "DescribeDBProxyEndpointsRequest$DBProxyName": "

The name of the DB proxy whose endpoints you want to describe. If you omit this parameter, the output includes information about all DB proxy endpoints associated with all your DB proxies.

" + } + }, "DBProxyNotFoundFault": { "base": "

The specified proxy name doesn't correspond to a proxy owned by your AWS account in the specified AWS Region.

", "refs": { @@ -1534,6 +1609,16 @@ "refs": { } }, + "DeleteDBProxyEndpointRequest": { + "base": null, + "refs": { + } + }, + "DeleteDBProxyEndpointResponse": { + "base": null, + "refs": { + } + }, "DeleteDBProxyRequest": { "base": null, "refs": { @@ -1716,6 +1801,16 @@ "refs": { } }, + "DescribeDBProxyEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDBProxyEndpointsResponse": { + "base": null, + "refs": { + } + }, "DescribeDBProxyTargetGroupsRequest": { "base": null, "refs": { @@ -2117,6 +2212,7 @@ "DescribeDBParameterGroupsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBParametersMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBProxiesRequest$Filters": "

This parameter is not currently supported.

", + "DescribeDBProxyEndpointsRequest$Filters": "

This parameter is not currently supported.

", "DescribeDBProxyTargetGroupsRequest$Filters": "

This parameter is not currently supported.

", "DescribeDBProxyTargetsRequest$Filters": "

This parameter is not currently supported.

", "DescribeDBSecurityGroupsMessage$Filters": "

This parameter isn't currently supported.

", @@ -2479,6 +2575,11 @@ "refs": { } }, + "InvalidDBProxyEndpointStateFault": { + "base": "

You can't perform this operation while the DB proxy endpoint is in a particular state.

", + "refs": { + } + }, "InvalidDBProxyStateFault": { "base": "

The requested operation can't be performed while the proxy is in this state.

", "refs": { @@ -2623,6 +2724,7 @@ "base": null, "refs": { "DescribeDBProxiesRequest$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", + "DescribeDBProxyEndpointsRequest$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeDBProxyTargetGroupsRequest$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeDBProxyTargetsRequest$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeExportTasksMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified value, a pagination token called a marker is included in the response. You can use the marker in a later DescribeExportTasks request to retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

" @@ -2700,6 +2802,16 @@ "refs": { } }, + "ModifyDBProxyEndpointRequest": { + "base": null, + "refs": { + } + }, + "ModifyDBProxyEndpointResponse": { + "base": null, + "refs": { + } + }, "ModifyDBProxyRequest": { "base": null, "refs": { @@ -3801,8 +3913,14 @@ "DBProxy$DBProxyName": "

The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region.

", "DBProxy$DBProxyArn": "

The Amazon Resource Name (ARN) for the proxy.

", "DBProxy$EngineFamily": "

The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

", + "DBProxy$VpcId": "

Provides the VPC ID of the DB proxy.

", "DBProxy$RoleArn": "

The Amazon Resource Name (ARN) for the IAM role that the proxy uses to access Amazon Secrets Manager.

", - "DBProxy$Endpoint": "

The endpoint that you can use to connect to the proxy. You include the endpoint value in the connection string for a database client application.

", + "DBProxy$Endpoint": "

The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application.

", + "DBProxyEndpoint$DBProxyEndpointName": "

The name for the DB proxy endpoint. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

", + "DBProxyEndpoint$DBProxyEndpointArn": "

The Amazon Resource Name (ARN) for the DB proxy endpoint.

", + "DBProxyEndpoint$DBProxyName": "

The identifier for the DB proxy that is associated with this DB proxy endpoint.

", + "DBProxyEndpoint$VpcId": "

Provides the VPC ID of the DB proxy endpoint.

", + "DBProxyEndpoint$Endpoint": "

The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application.

", "DBProxyTarget$TargetArn": "

The Amazon Resource Name (ARN) for the RDS DB instance or Aurora DB cluster.

", "DBProxyTarget$Endpoint": "

The writer endpoint for the RDS DB instance or Aurora DB cluster.

", "DBProxyTarget$TrackedClusterId": "

The DB cluster identifier when the target represents an Aurora DB cluster. This field is blank when the target represents an RDS DB instance.

", @@ -3911,9 +4029,11 @@ "DescribeDBParametersMessage$DBParameterGroupName": "

The name of a specific DB parameter group to return details for.

Constraints:

", "DescribeDBParametersMessage$Source": "

The parameter types to return.

Default: All parameter types returned

Valid Values: user | system | engine-default

", "DescribeDBParametersMessage$Marker": "

An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", - "DescribeDBProxiesRequest$DBProxyName": "

The name of the DB proxy.

", + "DescribeDBProxiesRequest$DBProxyName": "

The name of the DB proxy. If you omit this parameter, the output includes information about all DB proxies owned by your AWS account ID.

", "DescribeDBProxiesRequest$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBProxiesResponse$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", + "DescribeDBProxyEndpointsRequest$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", + "DescribeDBProxyEndpointsResponse$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBProxyTargetGroupsRequest$DBProxyName": "

The identifier of the DBProxy associated with the target group.

", "DescribeDBProxyTargetGroupsRequest$TargetGroupName": "

The identifier of the DBProxyTargetGroup to describe.

", "DescribeDBProxyTargetGroupsRequest$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", @@ -4392,6 +4512,8 @@ "ConnectionPoolConfigurationInfo$SessionPinningFilters": "

Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is EXCLUDE_VARIABLE_SETS.

", "CreateDBClusterEndpointMessage$StaticMembers": "

List of DB instance identifiers that are part of the custom endpoint group.

", "CreateDBClusterEndpointMessage$ExcludedMembers": "

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

", + "CreateDBProxyEndpointRequest$VpcSubnetIds": "

The VPC subnet IDs for the DB proxy endpoint that you create. You can specify a different set of subnet IDs than for the original DB proxy.

", + "CreateDBProxyEndpointRequest$VpcSecurityGroupIds": "

The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC.

", "CreateDBProxyRequest$VpcSubnetIds": "

One or more VPC subnet IDs to associate with the new proxy.

", "CreateDBProxyRequest$VpcSecurityGroupIds": "

One or more VPC security group IDs to associate with the new proxy.

", "DBCluster$CustomEndpoints": "

Identifies all custom endpoints associated with the cluster.

", @@ -4399,11 +4521,14 @@ "DBClusterEndpoint$ExcludedMembers": "

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

", "DBProxy$VpcSecurityGroupIds": "

Provides a list of VPC security groups that the proxy belongs to.

", "DBProxy$VpcSubnetIds": "

The EC2 subnet IDs for the proxy.

", + "DBProxyEndpoint$VpcSecurityGroupIds": "

Provides a list of VPC security groups that the DB proxy endpoint belongs to.

", + "DBProxyEndpoint$VpcSubnetIds": "

The EC2 subnet IDs for the DB proxy endpoint.

", "DeregisterDBProxyTargetsRequest$DBInstanceIdentifiers": "

One or more DB instance identifiers.

", "DeregisterDBProxyTargetsRequest$DBClusterIdentifiers": "

One or more DB cluster identifiers.

", "ExportTask$ExportOnly": "

The data exported from the snapshot. Valid values are the following:

", "ModifyDBClusterEndpointMessage$StaticMembers": "

List of DB instance identifiers that are part of the custom endpoint group.

", "ModifyDBClusterEndpointMessage$ExcludedMembers": "

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

", + "ModifyDBProxyEndpointRequest$VpcSecurityGroupIds": "

The VPC security group IDs for the DB proxy endpoint. When the DB proxy endpoint uses a different VPC than the original proxy, you also specify a different set of security group IDs than for the original proxy.

", "ModifyDBProxyRequest$SecurityGroups": "

The new list of security groups for the DBProxy.

", "RegisterDBProxyTargetsRequest$DBInstanceIdentifiers": "

One or more DB instance identifiers.

", "RegisterDBProxyTargetsRequest$DBClusterIdentifiers": "

One or more DB cluster identifiers.

", @@ -4489,6 +4614,7 @@ "DBInstanceAutomatedBackup$InstanceCreateTime": "

Provides the date and time that the DB instance was created.

", "DBProxy$CreatedDate": "

The date and time when the proxy was first created.

", "DBProxy$UpdatedDate": "

The date and time when the proxy was last updated.

", + "DBProxyEndpoint$CreatedDate": "

The date and time when the DB proxy endpoint was first created.

", "DBProxyTargetGroup$CreatedDate": "

The date and time when the target group was first created.

", "DBProxyTargetGroup$UpdatedDate": "

The date and time when the target group was last updated.

", "DBSnapshot$SnapshotCreateTime": "

Specifies when the snapshot was taken in Coordinated Universal Time (UTC).

", @@ -4531,6 +4657,7 @@ "CreateDBInstanceMessage$Tags": "

Tags to assign to the DB instance.

", "CreateDBInstanceReadReplicaMessage$Tags": null, "CreateDBParameterGroupMessage$Tags": "

Tags to assign to the DB parameter group.

", + "CreateDBProxyEndpointRequest$Tags": null, "CreateDBProxyRequest$Tags": "

An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy.

", "CreateDBSecurityGroupMessage$Tags": "

Tags to assign to the DB security group.

", "CreateDBSnapshotMessage$Tags": null, @@ -4581,6 +4708,12 @@ "RegisterDBProxyTargetsResponse$DBProxyTargets": "

One or more DBProxyTarget objects that are created when you register targets with a target group.

" } }, + "TargetRole": { + "base": null, + "refs": { + "DBProxyTarget$Role": "

A value that indicates whether the target of the proxy can be used for read/write or read-only operations.

" + } + }, "TargetState": { "base": null, "refs": { diff --git a/models/apis/rds/2014-10-31/paginators-1.json b/models/apis/rds/2014-10-31/paginators-1.json index e4a45469e4c..4f8e576c85e 100644 --- a/models/apis/rds/2014-10-31/paginators-1.json +++ b/models/apis/rds/2014-10-31/paginators-1.json @@ -90,6 +90,12 @@ "output_token": "Marker", "result_key": "DBProxies" }, + "DescribeDBProxyEndpoints": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBProxyEndpoints" + }, "DescribeDBProxyTargetGroups": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 01adfd5e487..0a1b8a5fc4d 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -262,12 +262,13 @@ }, "amplifybackend" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, - "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, @@ -6820,6 +6821,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, diff --git a/service/autoscaling/api.go b/service/autoscaling/api.go index 470c1e2d181..ccb607ddb17 100644 --- a/service/autoscaling/api.go +++ b/service/autoscaling/api.go @@ -436,8 +436,7 @@ func (c *AutoScaling) BatchPutScheduledUpdateGroupActionRequest(input *BatchPutS // BatchPutScheduledUpdateGroupAction API operation for Auto Scaling. // // Creates or updates one or more scheduled scaling actions for an Auto Scaling -// group. If you leave a parameter unspecified when updating a scheduled scaling -// action, the corresponding value remains unchanged. +// group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4757,8 +4756,6 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp // PutScheduledUpdateGroupAction API operation for Auto Scaling. // // Creates or updates a scheduled scaling action for an Auto Scaling group. -// If you leave a parameter unspecified when updating a scheduled scaling action, -// the corresponding value remains unchanged. // // For more information, see Scheduled scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/schedule_time.html) // in the Amazon EC2 Auto Scaling User Guide. @@ -13027,8 +13024,7 @@ type PutScheduledUpdateGroupActionInput struct { // scale beyond this capacity if you add more scaling conditions. DesiredCapacity *int64 `type:"integer"` - // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling - // does not perform the action after this time. + // The date and time for the recurring schedule to end, in UTC. EndTime *time.Time `type:"timestamp"` // The maximum size of the Auto Scaling group. @@ -13037,14 +13033,15 @@ type PutScheduledUpdateGroupActionInput struct { // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` - // The recurring schedule for this action, in Unix cron syntax format. This - // format consists of five fields separated by white spaces: [Minute] [Hour] - // [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes - // (for example, "30 0 1 1,6,12 *"). For more information about this format, - // see Crontab (http://crontab.org). + // The recurring schedule for this action. This format consists of five fields + // separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] + // [Day_of_Week]. The value must be in quotes (for example, "30 0 1 1,6,12 *"). + // For more information about this format, see Crontab (http://crontab.org). // // When StartTime and EndTime are specified with Recurrence, they form the boundaries // of when the recurring action starts and stops. + // + // Cron expressions use Universal Coordinated Time (UTC) by default. Recurrence *string `min:"1" type:"string"` // The name of this scaling action. @@ -13065,6 +13062,15 @@ type PutScheduledUpdateGroupActionInput struct { // This parameter is no longer used. Time *time.Time `type:"timestamp"` + + // Specifies the time zone for a cron expression. If a time zone is not provided, + // UTC is used by default. + // + // Valid values are the canonical names of the IANA time zones, derived from + // the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more + // information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + // (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). + TimeZone *string `min:"1" type:"string"` } // String returns the string representation @@ -13095,6 +13101,9 @@ func (s *PutScheduledUpdateGroupActionInput) Validate() error { if s.ScheduledActionName != nil && len(*s.ScheduledActionName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ScheduledActionName", 1)) } + if s.TimeZone != nil && len(*s.TimeZone) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimeZone", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -13156,6 +13165,12 @@ func (s *PutScheduledUpdateGroupActionInput) SetTime(v time.Time) *PutScheduledU return s } +// SetTimeZone sets the TimeZone field's value. +func (s *PutScheduledUpdateGroupActionInput) SetTimeZone(v string) *PutScheduledUpdateGroupActionInput { + s.TimeZone = &v + return s +} + type PutScheduledUpdateGroupActionOutput struct { _ struct{} `type:"structure"` } @@ -13596,6 +13611,9 @@ type ScheduledUpdateGroupAction struct { // This parameter is no longer used. Time *time.Time `type:"timestamp"` + + // The time zone for the cron expression. + TimeZone *string `min:"1" type:"string"` } // String returns the string representation @@ -13668,11 +13686,14 @@ func (s *ScheduledUpdateGroupAction) SetTime(v time.Time) *ScheduledUpdateGroupA return s } +// SetTimeZone sets the TimeZone field's value. +func (s *ScheduledUpdateGroupAction) SetTimeZone(v string) *ScheduledUpdateGroupAction { + s.TimeZone = &v + return s +} + // Describes information used for one or more scheduled scaling action updates // in a BatchPutScheduledUpdateGroupAction operation. -// -// When updating a scheduled scaling action, all optional parameters are left -// unchanged if not specified. type ScheduledUpdateGroupActionRequest struct { _ struct{} `type:"structure"` @@ -13680,8 +13701,7 @@ type ScheduledUpdateGroupActionRequest struct { // the scheduled action runs and the capacity it attempts to maintain. DesiredCapacity *int64 `type:"integer"` - // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling - // does not perform the action after this time. + // The date and time for the recurring schedule to end, in UTC. EndTime *time.Time `type:"timestamp"` // The maximum size of the Auto Scaling group. @@ -13697,6 +13717,8 @@ type ScheduledUpdateGroupActionRequest struct { // // When StartTime and EndTime are specified with Recurrence, they form the boundaries // of when the recurring action starts and stops. + // + // Cron expressions use Universal Coordinated Time (UTC) by default. Recurrence *string `min:"1" type:"string"` // The name of the scaling action. @@ -13714,6 +13736,15 @@ type ScheduledUpdateGroupActionRequest struct { // If you try to schedule the action in the past, Amazon EC2 Auto Scaling returns // an error message. StartTime *time.Time `type:"timestamp"` + + // Specifies the time zone for a cron expression. If a time zone is not provided, + // UTC is used by default. + // + // Valid values are the canonical names of the IANA time zones, derived from + // the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more + // information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + // (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). + TimeZone *string `min:"1" type:"string"` } // String returns the string representation @@ -13738,6 +13769,9 @@ func (s *ScheduledUpdateGroupActionRequest) Validate() error { if s.ScheduledActionName != nil && len(*s.ScheduledActionName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ScheduledActionName", 1)) } + if s.TimeZone != nil && len(*s.TimeZone) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimeZone", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -13787,6 +13821,12 @@ func (s *ScheduledUpdateGroupActionRequest) SetStartTime(v time.Time) *Scheduled return s } +// SetTimeZone sets the TimeZone field's value. +func (s *ScheduledUpdateGroupActionRequest) SetTimeZone(v string) *ScheduledUpdateGroupActionRequest { + s.TimeZone = &v + return s +} + type SetDesiredCapacityInput struct { _ struct{} `type:"structure"` diff --git a/service/codeguruprofiler/api.go b/service/codeguruprofiler/api.go index 4f28e66fe0a..a8f4297232f 100644 --- a/service/codeguruprofiler/api.go +++ b/service/codeguruprofiler/api.go @@ -245,7 +245,7 @@ func (c *CodeGuruProfiler) ConfigureAgentRequest(input *ConfigureAgentInput) (re // ConfigureAgent API operation for Amazon CodeGuru Profiler. // // Used by profiler agents to report their current state and to receive remote -// configuration updates. For example, ConfigureAgent can be used to tell and +// configuration updates. For example, ConfigureAgent can be used to tell an // agent whether to profile or not and for how long to return profiling data. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -443,6 +443,11 @@ func (c *CodeGuruProfiler) DeleteProfilingGroupRequest(input *DeleteProfilingGro // * InternalServerException // The server encountered an internal error and is unable to complete the request. // +// * ConflictException +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// // * ValidationException // The parameter is not valid. // @@ -6325,66 +6330,66 @@ func ActionGroup_Values() []string { } const ( - // AgentParameterFieldMaxStackDepth is a AgentParameterField enum value - AgentParameterFieldMaxStackDepth = "MaxStackDepth" + // AgentParameterFieldSamplingIntervalInMilliseconds is a AgentParameterField enum value + AgentParameterFieldSamplingIntervalInMilliseconds = "SamplingIntervalInMilliseconds" - // AgentParameterFieldMemoryUsageLimitPercent is a AgentParameterField enum value - AgentParameterFieldMemoryUsageLimitPercent = "MemoryUsageLimitPercent" + // AgentParameterFieldReportingIntervalInMilliseconds is a AgentParameterField enum value + AgentParameterFieldReportingIntervalInMilliseconds = "ReportingIntervalInMilliseconds" // AgentParameterFieldMinimumTimeForReportingInMilliseconds is a AgentParameterField enum value AgentParameterFieldMinimumTimeForReportingInMilliseconds = "MinimumTimeForReportingInMilliseconds" - // AgentParameterFieldReportingIntervalInMilliseconds is a AgentParameterField enum value - AgentParameterFieldReportingIntervalInMilliseconds = "ReportingIntervalInMilliseconds" + // AgentParameterFieldMemoryUsageLimitPercent is a AgentParameterField enum value + AgentParameterFieldMemoryUsageLimitPercent = "MemoryUsageLimitPercent" - // AgentParameterFieldSamplingIntervalInMilliseconds is a AgentParameterField enum value - AgentParameterFieldSamplingIntervalInMilliseconds = "SamplingIntervalInMilliseconds" + // AgentParameterFieldMaxStackDepth is a AgentParameterField enum value + AgentParameterFieldMaxStackDepth = "MaxStackDepth" ) // AgentParameterField_Values returns all elements of the AgentParameterField enum func AgentParameterField_Values() []string { return []string{ - AgentParameterFieldMaxStackDepth, - AgentParameterFieldMemoryUsageLimitPercent, - AgentParameterFieldMinimumTimeForReportingInMilliseconds, - AgentParameterFieldReportingIntervalInMilliseconds, AgentParameterFieldSamplingIntervalInMilliseconds, + AgentParameterFieldReportingIntervalInMilliseconds, + AgentParameterFieldMinimumTimeForReportingInMilliseconds, + AgentParameterFieldMemoryUsageLimitPercent, + AgentParameterFieldMaxStackDepth, } } const ( - // AggregationPeriodP1d is a AggregationPeriod enum value - AggregationPeriodP1d = "P1D" + // AggregationPeriodPt5m is a AggregationPeriod enum value + AggregationPeriodPt5m = "PT5M" // AggregationPeriodPt1h is a AggregationPeriod enum value AggregationPeriodPt1h = "PT1H" - // AggregationPeriodPt5m is a AggregationPeriod enum value - AggregationPeriodPt5m = "PT5M" + // AggregationPeriodP1d is a AggregationPeriod enum value + AggregationPeriodP1d = "P1D" ) // AggregationPeriod_Values returns all elements of the AggregationPeriod enum func AggregationPeriod_Values() []string { return []string{ - AggregationPeriodP1d, - AggregationPeriodPt1h, AggregationPeriodPt5m, + AggregationPeriodPt1h, + AggregationPeriodP1d, } } const ( - // ComputePlatformAwslambda is a ComputePlatform enum value - ComputePlatformAwslambda = "AWSLambda" - // ComputePlatformDefault is a ComputePlatform enum value ComputePlatformDefault = "Default" + + // ComputePlatformAwslambda is a ComputePlatform enum value + ComputePlatformAwslambda = "AWSLambda" ) // ComputePlatform_Values returns all elements of the ComputePlatform enum func ComputePlatform_Values() []string { return []string{ - ComputePlatformAwslambda, ComputePlatformDefault, + ComputePlatformAwslambda, } } @@ -6401,31 +6406,31 @@ func EventPublisher_Values() []string { } const ( - // FeedbackTypeNegative is a FeedbackType enum value - FeedbackTypeNegative = "Negative" - // FeedbackTypePositive is a FeedbackType enum value FeedbackTypePositive = "Positive" + + // FeedbackTypeNegative is a FeedbackType enum value + FeedbackTypeNegative = "Negative" ) // FeedbackType_Values returns all elements of the FeedbackType enum func FeedbackType_Values() []string { return []string{ - FeedbackTypeNegative, FeedbackTypePositive, + FeedbackTypeNegative, } } const ( + // MetadataFieldComputePlatform is a MetadataField enum value + MetadataFieldComputePlatform = "ComputePlatform" + // MetadataFieldAgentId is a MetadataField enum value MetadataFieldAgentId = "AgentId" // MetadataFieldAwsRequestId is a MetadataField enum value MetadataFieldAwsRequestId = "AwsRequestId" - // MetadataFieldComputePlatform is a MetadataField enum value - MetadataFieldComputePlatform = "ComputePlatform" - // MetadataFieldExecutionEnvironment is a MetadataField enum value MetadataFieldExecutionEnvironment = "ExecutionEnvironment" @@ -6435,28 +6440,28 @@ const ( // MetadataFieldLambdaMemoryLimitInMb is a MetadataField enum value MetadataFieldLambdaMemoryLimitInMb = "LambdaMemoryLimitInMB" - // MetadataFieldLambdaPreviousExecutionTimeInMilliseconds is a MetadataField enum value - MetadataFieldLambdaPreviousExecutionTimeInMilliseconds = "LambdaPreviousExecutionTimeInMilliseconds" - // MetadataFieldLambdaRemainingTimeInMilliseconds is a MetadataField enum value MetadataFieldLambdaRemainingTimeInMilliseconds = "LambdaRemainingTimeInMilliseconds" // MetadataFieldLambdaTimeGapBetweenInvokesInMilliseconds is a MetadataField enum value MetadataFieldLambdaTimeGapBetweenInvokesInMilliseconds = "LambdaTimeGapBetweenInvokesInMilliseconds" + + // MetadataFieldLambdaPreviousExecutionTimeInMilliseconds is a MetadataField enum value + MetadataFieldLambdaPreviousExecutionTimeInMilliseconds = "LambdaPreviousExecutionTimeInMilliseconds" ) // MetadataField_Values returns all elements of the MetadataField enum func MetadataField_Values() []string { return []string{ + MetadataFieldComputePlatform, MetadataFieldAgentId, MetadataFieldAwsRequestId, - MetadataFieldComputePlatform, MetadataFieldExecutionEnvironment, MetadataFieldLambdaFunctionArn, MetadataFieldLambdaMemoryLimitInMb, - MetadataFieldLambdaPreviousExecutionTimeInMilliseconds, MetadataFieldLambdaRemainingTimeInMilliseconds, MetadataFieldLambdaTimeGapBetweenInvokesInMilliseconds, + MetadataFieldLambdaPreviousExecutionTimeInMilliseconds, } } @@ -6473,17 +6478,17 @@ func MetricType_Values() []string { } const ( - // OrderByTimestampAscending is a OrderBy enum value - OrderByTimestampAscending = "TimestampAscending" - // OrderByTimestampDescending is a OrderBy enum value OrderByTimestampDescending = "TimestampDescending" + + // OrderByTimestampAscending is a OrderBy enum value + OrderByTimestampAscending = "TimestampAscending" ) // OrderBy_Values returns all elements of the OrderBy enum func OrderBy_Values() []string { return []string{ - OrderByTimestampAscending, OrderByTimestampDescending, + OrderByTimestampAscending, } } diff --git a/service/codeguruprofiler/doc.go b/service/codeguruprofiler/doc.go index 8cf5ac960a2..394b6d19bc6 100644 --- a/service/codeguruprofiler/doc.go +++ b/service/codeguruprofiler/doc.go @@ -6,22 +6,24 @@ // This section provides documentation for the Amazon CodeGuru Profiler API // operations. // -//

Amazon CodeGuru Profiler collects runtime performance data from your -// live applications, and provides recommendations that can help you fine-tune -// your application performance. Using machine learning algorithms, CodeGuru -// Profiler can help you find your most expensive lines of code and suggest -// ways you can improve efficiency and remove CPU bottlenecks.

Amazon -// CodeGuru Profiler provides different visualizations of profiling data -// to help you identify what code is running on the CPU, see how much time -// is consumed, and suggest ways to reduce CPU utilization.

Amazon -// CodeGuru Profiler currently supports applications written in all Java -// virtual machine (JVM) languages. While CodeGuru Profiler supports both -// visualizations and recommendations for applications written in Java, it -// can also generate visualizations and a subset of recommendations for applications -// written in other JVM languages.

For more information, -// see What -// is Amazon CodeGuru Profiler in the Amazon CodeGuru Profiler User -// Guide.

+// Amazon CodeGuru Profiler collects runtime performance data from your live +// applications, and provides recommendations that can help you fine-tune your +// application performance. Using machine learning algorithms, CodeGuru Profiler +// can help you find your most expensive lines of code and suggest ways you +// can improve efficiency and remove CPU bottlenecks. +// +// Amazon CodeGuru Profiler provides different visualizations of profiling data +// to help you identify what code is running on the CPU, see how much time is +// consumed, and suggest ways to reduce CPU utilization. +// +// Amazon CodeGuru Profiler currently supports applications written in all Java +// virtual machine (JVM) languages and Python. While CodeGuru Profiler supports +// both visualizations and recommendations for applications written in Java, +// it can also generate visualizations and a subset of recommendations for applications +// written in other JVM languages and Python. +// +// For more information, see What is Amazon CodeGuru Profiler (https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html) +// in the Amazon CodeGuru Profiler User Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/codeguruprofiler-2019-07-18 for more information on this service. // diff --git a/service/efs/api.go b/service/efs/api.go index 32d25cdd2d3..4df7c02ac6f 100644 --- a/service/efs/api.go +++ b/service/efs/api.go @@ -63,8 +63,8 @@ func (c *EFS) CreateAccessPointRequest(input *CreateAccessPointInput) (req *requ // point. The operating system user and group override any identity information // provided by the NFS client. The file system path is exposed as the access // point's root directory. Applications using the access point can only access -// data in its own directory and below. To learn more, see Mounting a File System -// Using EFS Access Points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html). +// data in its own directory and below. To learn more, see Mounting a file system +// using EFS access points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html). // // This operation requires permissions for the elasticfilesystem:CreateAccessPoint // action. @@ -190,18 +190,24 @@ func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *reques // if the initial call had succeeded in creating a file system, the client can // learn of its existence from the FileSystemAlreadyExists error. // +// For more information, see Creating a file system (https://docs.aws.amazon.com/efs/latest/ug/creating-using-create-fs.html#creating-using-create-fs-part1) +// in the Amazon EFS User Guide. +// // The CreateFileSystem call returns while the file system's lifecycle state // is still creating. You can check the file system creation status by calling // the DescribeFileSystems operation, which among other things returns the file // system state. // -// This operation also takes an optional PerformanceMode parameter that you -// choose for your file system. We recommend generalPurpose performance mode -// for most file systems. File systems using the maxIO performance mode can -// scale to higher levels of aggregate throughput and operations per second -// with a tradeoff of slightly higher latencies for most file operations. The -// performance mode can't be changed after the file system has been created. -// For more information, see Amazon EFS: Performance Modes (https://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html). +// This operation accepts an optional PerformanceMode parameter that you choose +// for your file system. We recommend generalPurpose performance mode for most +// file systems. File systems using the maxIO performance mode can scale to +// higher levels of aggregate throughput and operations per second with a tradeoff +// of slightly higher latencies for most file operations. The performance mode +// can't be changed after the file system has been created. For more information, +// see Amazon EFS performance modes (https://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html). +// +// You can set the throughput mode for the file system using the ThroughputMode +// parameter. // // After the file system is fully created, Amazon EFS sets its lifecycle state // to available, at which point you can create one or more mount targets for @@ -241,12 +247,16 @@ func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *reques // This value might be returned when you try to create a file system in provisioned // throughput mode, when you attempt to increase the provisioned throughput // of an existing file system, or when you attempt to change an existing file -// system from bursting to provisioned throughput mode. +// system from bursting to provisioned throughput mode. Try again later. // // * ThroughputLimitExceeded // Returned if the throughput mode or amount of provisioned throughput can't // be changed because the throughput limit of 1024 MiB/s has been reached. // +// * UnsupportedAvailabilityZone +// Returned if the requested Amazon EFS functionality is not available in the +// specified Availability Zone. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/CreateFileSystem func (c *EFS) CreateFileSystem(input *CreateFileSystemInput) (*FileSystemDescription, error) { req, out := c.CreateFileSystemRequest(input) @@ -321,20 +331,29 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ // target for a given file system. If you have multiple subnets in an Availability // Zone, you create a mount target in one of the subnets. EC2 instances do not // need to be in the same subnet as the mount target in order to access their -// file system. For more information, see Amazon EFS: How it Works (https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). +// file system. +// +// You can create only one mount target for an EFS file system using One Zone +// storage classes. You must create that mount target in the same Availability +// Zone in which the file system is located. Use the AvailabilityZoneName and +// AvailabiltyZoneId properties in the DescribeFileSystems response object to +// get this information. Use the subnetId associated with the file system's +// Availability Zone when creating the mount target. // -// In the request, you also specify a file system ID for which you are creating -// the mount target and the file system's lifecycle state must be available. -// For more information, see DescribeFileSystems. +// For more information, see Amazon EFS: How it Works (https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). // -// In the request, you also provide a subnet ID, which determines the following: +// To create a mount target for a file system, the file system's lifecycle state +// must be available. For more information, see DescribeFileSystems. // -// * VPC in which Amazon EFS creates the mount target +// In the request, provide the following: // -// * Availability Zone in which Amazon EFS creates the mount target +// * The file system ID for which you are creating the mount target. // -// * IP address range from which Amazon EFS selects the IP address of the -// mount target (if you don't specify an IP address in the request) +// * A subnet ID, which determines the following: The VPC in which Amazon +// EFS creates the mount target The Availability Zone in which Amazon EFS +// creates the mount target The IP address range from which Amazon EFS selects +// the IP address of the mount target (if you don't specify an IP address +// in the request) // // After creating the mount target, Amazon EFS returns a response that includes, // a MountTargetId and an IpAddress. You use this IP address when mounting the @@ -457,6 +476,14 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ // VPC. // // * UnsupportedAvailabilityZone +// Returned if the requested Amazon EFS functionality is not available in the +// specified Availability Zone. +// +// * AvailabilityZonesMismatch +// Returned if the Availability Zone that was specified for a mount target is +// different from the Availability Zone that was specified for One Zone storage +// classes. For more information, see Regional and One Zone storage redundancy +// (https://docs.aws.amazon.com/efs/latest/ug/availability-durability.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/CreateMountTarget func (c *EFS) CreateMountTarget(input *CreateMountTargetInput) (*MountTargetDescription, error) { @@ -1321,8 +1348,8 @@ func (c *EFS) DescribeBackupPolicyRequest(input *DescribeBackupPolicyInput) (req // system specified. // // * ValidationException -// Returned if the AWS Backup service is not available in the region that the -// request was made. +// Returned if the AWS Backup service is not available in the Region in which +// the request was made. // // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/DescribeBackupPolicy func (c *EFS) DescribeBackupPolicy(input *DescribeBackupPolicyInput) (*DescribeBackupPolicyOutput, error) { @@ -2399,8 +2426,8 @@ func (c *EFS) PutBackupPolicyRequest(input *PutBackupPolicyInput) (req *request. // Returned if an error occurred on the server side. // // * ValidationException -// Returned if the AWS Backup service is not available in the region that the -// request was made. +// Returned if the AWS Backup service is not available in the Region in which +// the request was made. // // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/PutBackupPolicy func (c *EFS) PutBackupPolicy(input *PutBackupPolicyInput) (*PutBackupPolicyOutput, error) { @@ -2472,9 +2499,12 @@ func (c *EFS) PutFileSystemPolicyRequest(input *PutFileSystemPolicyInput) (req * // system policy is an IAM resource-based policy and can contain multiple policy // statements. A file system always has exactly one file system policy, which // can be the default policy or an explicit policy set or updated using this -// API operation. When an explicit policy is set, it overrides the default policy. -// For more information about the default file system policy, see Default EFS -// File System Policy (https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html#default-filesystempolicy). +// API operation. EFS file system policies have a 20,000 character limit. When +// an explicit policy is set, it overrides the default policy. For more information +// about the default file system policy, see Default EFS File System Policy +// (https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html#default-filesystempolicy). +// +// EFS file system policies have a 20,000 character limit. // // This operation requires permissions for the elasticfilesystem:PutFileSystemPolicy // action. @@ -2904,7 +2934,7 @@ func (c *EFS) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // This value might be returned when you try to create a file system in provisioned // throughput mode, when you attempt to increase the provisioned throughput // of an existing file system, or when you attempt to change an existing file -// system from bursting to provisioned throughput mode. +// system from bursting to provisioned throughput mode. Try again later. // // * InternalServerError // Returned if an error occurred on the server side. @@ -3230,20 +3260,82 @@ func (s *AccessPointNotFound) RequestID() string { return s.RespMetadata.RequestID } -// The backup policy for the file system, showing the curent status. If ENABLED, -// the file system is being backed up. +// Returned if the Availability Zone that was specified for a mount target is +// different from the Availability Zone that was specified for One Zone storage +// classes. For more information, see Regional and One Zone storage redundancy +// (https://docs.aws.amazon.com/efs/latest/ug/availability-durability.html). +type AvailabilityZonesMismatch struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + ErrorCode *string `min:"1" type:"string"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZonesMismatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZonesMismatch) GoString() string { + return s.String() +} + +func newErrorAvailabilityZonesMismatch(v protocol.ResponseMetadata) error { + return &AvailabilityZonesMismatch{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AvailabilityZonesMismatch) Code() string { + return "AvailabilityZonesMismatch" +} + +// Message returns the exception's message. +func (s *AvailabilityZonesMismatch) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AvailabilityZonesMismatch) OrigErr() error { + return nil +} + +func (s *AvailabilityZonesMismatch) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AvailabilityZonesMismatch) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AvailabilityZonesMismatch) RequestID() string { + return s.RespMetadata.RequestID +} + +// The backup policy for the file system used to create automatic daily backups. +// If status has a value of ENABLED, the file system is being automatically +// backed up. For more information, see Automatic backups (https://docs.aws.amazon.com/efs/latest/ug/awsbackup.html#automatic-backups). type BackupPolicy struct { _ struct{} `type:"structure"` // Describes the status of the file system's backup policy. // - // * ENABLED - EFS is automatically backing up the file system. + // * ENABLED - EFS is automatically backing up the file system.> // // * ENABLING - EFS is turning on automatic backups for the file system. // // * DISABLED - automatic back ups are turned off for the file system. // - // * DISABLED - EFS is turning off automatic backups for the file system. + // * DISABLING - EFS is turning off automatic backups for the file system. // // Status is a required field Status *string `type:"string" required:"true" enum:"Status"` @@ -3360,7 +3452,12 @@ type CreateAccessPointInput struct { // directory and below. If the RootDirectory > Path specified does not exist, // EFS creates it and applies the CreationInfo settings when a client connects // to an access point. When specifying a RootDirectory, you need to provide - // the Path, and the CreationInfo is optional. + // the Path, and the CreationInfo. + // + // Amazon EFS creates a root directory only if you have provided the CreationInfo: + // OwnUid, OwnGID, and permissions for the directory. If you do not provide + // this information, Amazon EFS does not create the root directory. If the root + // directory does not exist, attempts to mount using the access point will fail. RootDirectory *RootDirectory `type:"structure"` // Creates tags associated with the access point. Each tag is a key-value pair. @@ -3554,6 +3651,28 @@ func (s *CreateAccessPointOutput) SetTags(v []*Tag) *CreateAccessPointOutput { type CreateFileSystemInput struct { _ struct{} `type:"structure"` + // Used to create a file system that uses One Zone storage classes. It specifies + // the AWS Availability Zone in which to create the file system. Use the format + // us-east-1a to specify the Availability Zone. For more information about One + // Zone storage classes, see Using EFS storage classes (https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) + // in the Amazon EFS User Guide. + // + // One Zone storage classes are not available in all Availability Zones in AWS + // Regions where Amazon EFS is available. + AvailabilityZoneName *string `min:"1" type:"string"` + + // Specifies whether automatic backups are enabled on the file system that you + // are creating. Set the value to true to enable automatic backups. If you are + // creating a file system that uses One Zone storage classes, automatic backups + // are enabled by default. For more information, see Automatic backups (https://docs.aws.amazon.com/efs/latest/ug/awsbackup.html#automatic-backups) + // in the Amazon EFS User Guide. + // + // Default is false. However, if you specify an AvailabilityZoneName, the default + // is true. + // + // AWS Backup is not available in all AWS Regions where Amazon EFS is available. + Backup *bool `type:"boolean"` + // A string of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent // creation. CreationToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -3566,9 +3685,9 @@ type CreateFileSystemInput struct { Encrypted *bool `type:"boolean"` // The ID of the AWS KMS CMK to be used to protect the encrypted file system. - // This parameter is only required if you want to use a nondefault CMK. If this - // parameter is not specified, the default CMK for Amazon EFS is used. This - // ID can be in one of the following formats: + // This parameter is only required if you want to use a non-default CMK. If + // this parameter is not specified, the default CMK for Amazon EFS is used. + // This ID can be in one of the following formats: // // * Key ID - A unique identifier of the key, for example 1234abcd-12ab-34cd-56ef-1234567890ab. // @@ -3591,13 +3710,15 @@ type CreateFileSystemInput struct { // can scale to higher levels of aggregate throughput and operations per second // with a tradeoff of slightly higher latencies for most file operations. The // performance mode can't be changed after the file system has been created. + // + // The maxIO mode is not supported on file systems using One Zone storage classes. PerformanceMode *string `type:"string" enum:"PerformanceMode"` // The throughput, measured in MiB/s, that you want to provision for a file // system that you're creating. Valid values are 1-1024. Required if ThroughputMode - // is set to provisioned. The upper limit for throughput is 1024 MiB/s. You - // can get this limit increased by contacting AWS Support. For more information, - // see Amazon EFS Limits That You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) + // is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase + // this limit, contact AWS Support. For more information, see Amazon EFS quotas + // that you can increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) // in the Amazon EFS User Guide. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` @@ -3606,14 +3727,16 @@ type CreateFileSystemInput struct { // on creation by including a "Key":"Name","Value":"{value}" key-value pair. Tags []*Tag `type:"list"` - // The throughput mode for the file system to be created. There are two throughput - // modes to choose from for your file system: bursting and provisioned. If you - // set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps. - // You can decrease your file system's throughput in Provisioned Throughput - // mode or change between the throughput modes as long as it’s been more than - // 24 hours since the last decrease or throughput mode change. For more, see - // Specifying Throughput with Provisioned Mode (https://docs.aws.amazon.com/efs/latest/ug/performance.html#provisioned-throughput) + // Specifies the throughput mode for the file system, either bursting or provisioned. + // If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps. + // After you create the file system, you can decrease your file system's throughput + // in Provisioned Throughput mode or change between the throughput modes, as + // long as it’s been more than 24 hours since the last decrease or throughput + // mode change. For more information, see Specifying throughput with provisioned + // mode (https://docs.aws.amazon.com/efs/latest/ug/performance.html#provisioned-throughput) // in the Amazon EFS User Guide. + // + // Default is bursting. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -3630,6 +3753,9 @@ func (s CreateFileSystemInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateFileSystemInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemInput"} + if s.AvailabilityZoneName != nil && len(*s.AvailabilityZoneName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZoneName", 1)) + } if s.CreationToken != nil && len(*s.CreationToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("CreationToken", 1)) } @@ -3653,6 +3779,18 @@ func (s *CreateFileSystemInput) Validate() error { return nil } +// SetAvailabilityZoneName sets the AvailabilityZoneName field's value. +func (s *CreateFileSystemInput) SetAvailabilityZoneName(v string) *CreateFileSystemInput { + s.AvailabilityZoneName = &v + return s +} + +// SetBackup sets the Backup field's value. +func (s *CreateFileSystemInput) SetBackup(v bool) *CreateFileSystemInput { + s.Backup = &v + return s +} + // SetCreationToken sets the CreationToken field's value. func (s *CreateFileSystemInput) SetCreationToken(v string) *CreateFileSystemInput { s.CreationToken = &v @@ -3710,7 +3848,9 @@ type CreateMountTargetInput struct { // for the same VPC as subnet specified. SecurityGroups []*string `type:"list"` - // The ID of the subnet to add the mount target in. + // The ID of the subnet to add the mount target in. For file systems that use + // One Zone storage classes, use the subnet that is associated with the file + // system's Availability Zone. // // SubnetId is a required field SubnetId *string `min:"15" type:"string" required:"true"` @@ -3858,6 +3998,11 @@ func (s CreateTagsOutput) GoString() string { // with these settings when a client connects to the access point. When specifying // CreationInfo, you must include values for all properties. // +// Amazon EFS creates a root directory only if you have provided the CreationInfo: +// OwnUid, OwnGID, and permissions for the directory. If you do not provide +// this information, Amazon EFS does not create the root directory. If the root +// directory does not exist, attempts to mount using the access point will fail. +// // If you do not provide CreationInfo and the specified RootDirectory does not // exist, attempts to mount the file system using the access point will fail. type CreationInfo struct { @@ -3879,7 +4024,7 @@ type CreationInfo struct { // of an octal number representing the file's mode bits. // // Permissions is a required field - Permissions *string `type:"string" required:"true"` + Permissions *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -3904,6 +4049,9 @@ func (s *CreationInfo) Validate() error { if s.Permissions == nil { invalidParams.Add(request.NewErrParamRequired("Permissions")) } + if s.Permissions != nil && len(*s.Permissions) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4299,7 +4447,7 @@ type DescribeAccessPointsInput struct { // NextToken is present if the response is paginated. You can use NextMarker // in the subsequent request to fetch the next page of access point descriptions. - NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + NextToken *string `location:"querystring" locationName:"NextToken" min:"1" type:"string"` } // String returns the string representation @@ -4318,6 +4466,9 @@ func (s *DescribeAccessPointsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4358,7 +4509,7 @@ type DescribeAccessPointsOutput struct { // Present if there are more access points than returned in the response. You // can use the NextMarker in the subsequent request to fetch the additional // descriptions. - NextToken *string `type:"string"` + NextToken *string `min:"1" type:"string"` } // String returns the string representation @@ -4496,7 +4647,7 @@ type DescribeFileSystemPolicyOutput struct { FileSystemId *string `type:"string"` // The JSON formatted FileSystemPolicy for the EFS file system. - Policy *string `type:"string"` + Policy *string `min:"1" type:"string"` } // String returns the string representation @@ -5087,6 +5238,18 @@ func (s *FileSystemAlreadyExists) RequestID() string { type FileSystemDescription struct { _ struct{} `type:"structure"` + // The unique and consistent identifier of the Availability Zone in which the + // file system's One Zone storage classes exist. For example, use1-az1 is an + // Availability Zone ID for the us-east-1 AWS Region, and it has the same location + // in every AWS account. + AvailabilityZoneId *string `type:"string"` + + // Describes the AWS Availability Zone in which the file system is located, + // and is valid only for file systems using One Zone storage classes. For more + // information, see Using EFS storage classes (https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) + // in the Amazon EFS User Guide. + AvailabilityZoneName *string `min:"1" type:"string"` + // The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z). // // CreationTime is a required field @@ -5140,12 +5303,8 @@ type FileSystemDescription struct { // PerformanceMode is a required field PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` - // The throughput, measured in MiB/s, that you want to provision for a file - // system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. - // The limit on throughput is 1024 MiB/s. You can get these limits increased - // by contacting AWS Support. For more information, see Amazon EFS Limits That - // You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) - // in the Amazon EFS User Guide. + // The amount of provisioned throughput, measured in MiB/s, for the file system. + // Valid for file systems using ThroughputMode set to provisioned. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // The latest known metered size (in bytes) of data stored in the file system, @@ -5166,12 +5325,9 @@ type FileSystemDescription struct { // Tags is a required field Tags []*Tag `type:"list" required:"true"` - // The throughput mode for a file system. There are two throughput modes to - // choose from for your file system: bursting and provisioned. If you set ThroughputMode - // to provisioned, you must also set a value for ProvisionedThroughPutInMibps. - // You can decrease your file system's throughput in Provisioned Throughput - // mode or change between the throughput modes as long as it’s been more than - // 24 hours since the last decrease or throughput mode change. + // Displays the file system's throughput mode. For more information, see Throughput + // modes (https://docs.aws.amazon.com/efs/latest/ug/performance.html#throughput-modes) + // in the Amazon EFS User Guide. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -5185,6 +5341,18 @@ func (s FileSystemDescription) GoString() string { return s.String() } +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *FileSystemDescription) SetAvailabilityZoneId(v string) *FileSystemDescription { + s.AvailabilityZoneId = &v + return s +} + +// SetAvailabilityZoneName sets the AvailabilityZoneName field's value. +func (s *FileSystemDescription) SetAvailabilityZoneName(v string) *FileSystemDescription { + s.AvailabilityZoneName = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *FileSystemDescription) SetCreationTime(v time.Time) *FileSystemDescription { s.CreationTime = &v @@ -5639,7 +5807,7 @@ func (s *IncorrectMountTargetState) RequestID() string { // This value might be returned when you try to create a file system in provisioned // throughput mode, when you attempt to increase the provisioned throughput // of an existing file system, or when you attempt to change an existing file -// system from bursting to provisioned throughput mode. +// system from bursting to provisioned throughput mode. Try again later. type InsufficientThroughputCapacity struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -5913,7 +6081,7 @@ type ListTagsForResourceInput struct { // You can use NextToken in a subsequent request to fetch the next page of access // point descriptions if the response payload was paginated. - NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + NextToken *string `location:"querystring" locationName:"NextToken" min:"1" type:"string"` // Specifies the EFS resource you want to retrieve tags for. You can retrieve // tags for EFS file systems and access points using this API endpoint. @@ -5938,6 +6106,9 @@ func (s *ListTagsForResourceInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } @@ -5974,7 +6145,7 @@ type ListTagsForResourceOutput struct { // NextToken is present if the response payload is paginated. You can use NextToken // in a subsequent request to fetch the next page of access point descriptions. - NextToken *string `type:"string"` + NextToken *string `min:"1" type:"string"` // An array of the tags for the specified EFS resource. Tags []*Tag `type:"list"` @@ -6130,16 +6301,16 @@ func (s *MountTargetConflict) RequestID() string { type MountTargetDescription struct { _ struct{} `type:"structure"` - // The unique and consistent identifier of the Availability Zone (AZ) that the - // mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 - // Region and it has the same location in every AWS account. + // The unique and consistent identifier of the Availability Zone that the mount + // target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region + // and it has the same location in every AWS account. AvailabilityZoneId *string `type:"string"` - // The name of the Availability Zone (AZ) that the mount target resides in. - // AZs are independently mapped to names for each AWS account. For example, + // The name of the Availability Zone in which the mount target is located. Availability + // Zones are independently mapped to names for each AWS account. For example, // the Availability Zone us-east-1a for your AWS account might not be the same // location as us-east-1a for another AWS account. - AvailabilityZoneName *string `type:"string"` + AvailabilityZoneName *string `min:"1" type:"string"` // The ID of the file system for which the mount target is intended. // @@ -6171,7 +6342,7 @@ type MountTargetDescription struct { // SubnetId is a required field SubnetId *string `min:"15" type:"string" required:"true"` - // The Virtual Private Cloud (VPC) ID that the mount target is configured in. + // The virtual private cloud (VPC) ID that the mount target is configured in. VpcId *string `type:"string"` } @@ -6657,11 +6828,12 @@ type PutFileSystemPolicyInput struct { FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` // The FileSystemPolicy that you're creating. Accepts a JSON formatted policy - // definition. To find out more about the elements that make up a file system - // policy, see EFS Resource-based Policies (https://docs.aws.amazon.com/efs/latest/ug/access-control-overview.html#access-control-manage-access-intro-resource-policies). + // definition. EFS file system policies have a 20,000 character limit. To find + // out more about the elements that make up a file system policy, see EFS Resource-based + // Policies (https://docs.aws.amazon.com/efs/latest/ug/access-control-overview.html#access-control-manage-access-intro-resource-policies). // // Policy is a required field - Policy *string `type:"string" required:"true"` + Policy *string `min:"1" type:"string" required:"true"` } // String returns the string representation @@ -6686,6 +6858,9 @@ func (s *PutFileSystemPolicyInput) Validate() error { if s.Policy == nil { invalidParams.Add(request.NewErrParamRequired("Policy")) } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6718,7 +6893,7 @@ type PutFileSystemPolicyOutput struct { FileSystemId *string `type:"string"` // The JSON formatted FileSystemPolicy for the EFS file system. - Policy *string `type:"string"` + Policy *string `min:"1" type:"string"` } // String returns the string representation @@ -7326,6 +7501,8 @@ func (s *TooManyRequests) RequestID() string { return s.RespMetadata.RequestID } +// Returned if the requested Amazon EFS functionality is not available in the +// specified Availability Zone. type UnsupportedAvailabilityZone struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -7392,7 +7569,7 @@ type UntagResourceInput struct { // ResourceId is a required field ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` - // The keys of the key:value tag pairs that you want to remove from the specified + // The keys of the key-value tag pairs that you want to remove from the specified // EFS resource. // // TagKeys is a required field @@ -7465,17 +7642,16 @@ type UpdateFileSystemInput struct { // FileSystemId is a required field FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` - // (Optional) The amount of throughput, in MiB/s, that you want to provision - // for your file system. Valid values are 1-1024. Required if ThroughputMode - // is changed to provisioned on update. If you're not updating the amount of - // provisioned throughput for your file system, you don't need to provide this - // value in your request. + // (Optional) Sets the amount of provisioned throughput, in MiB/s, for the file + // system. Valid values are 1-1024. If you are changing the throughput mode + // to provisioned, you must also provide the amount of provisioned throughput. + // Required if ThroughputMode is changed to provisioned on update. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` - // (Optional) The throughput mode that you want your file system to use. If - // you're not updating your throughput mode, you don't need to provide this - // value in your request. If you are changing the ThroughputMode to provisioned, - // you must also set a value for ProvisionedThroughputInMibps. + // (Optional) Updates the file system's throughput mode. If you're not updating + // your throughput mode, you don't need to provide this value in your request. + // If you are changing the ThroughputMode to provisioned, you must also set + // a value for ProvisionedThroughputInMibps. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -7530,6 +7706,18 @@ func (s *UpdateFileSystemInput) SetThroughputMode(v string) *UpdateFileSystemInp type UpdateFileSystemOutput struct { _ struct{} `type:"structure"` + // The unique and consistent identifier of the Availability Zone in which the + // file system's One Zone storage classes exist. For example, use1-az1 is an + // Availability Zone ID for the us-east-1 AWS Region, and it has the same location + // in every AWS account. + AvailabilityZoneId *string `type:"string"` + + // Describes the AWS Availability Zone in which the file system is located, + // and is valid only for file systems using One Zone storage classes. For more + // information, see Using EFS storage classes (https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) + // in the Amazon EFS User Guide. + AvailabilityZoneName *string `min:"1" type:"string"` + // The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z). // // CreationTime is a required field @@ -7583,12 +7771,8 @@ type UpdateFileSystemOutput struct { // PerformanceMode is a required field PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` - // The throughput, measured in MiB/s, that you want to provision for a file - // system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. - // The limit on throughput is 1024 MiB/s. You can get these limits increased - // by contacting AWS Support. For more information, see Amazon EFS Limits That - // You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) - // in the Amazon EFS User Guide. + // The amount of provisioned throughput, measured in MiB/s, for the file system. + // Valid for file systems using ThroughputMode set to provisioned. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // The latest known metered size (in bytes) of data stored in the file system, @@ -7609,12 +7793,9 @@ type UpdateFileSystemOutput struct { // Tags is a required field Tags []*Tag `type:"list" required:"true"` - // The throughput mode for a file system. There are two throughput modes to - // choose from for your file system: bursting and provisioned. If you set ThroughputMode - // to provisioned, you must also set a value for ProvisionedThroughPutInMibps. - // You can decrease your file system's throughput in Provisioned Throughput - // mode or change between the throughput modes as long as it’s been more than - // 24 hours since the last decrease or throughput mode change. + // Displays the file system's throughput mode. For more information, see Throughput + // modes (https://docs.aws.amazon.com/efs/latest/ug/performance.html#throughput-modes) + // in the Amazon EFS User Guide. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -7628,6 +7809,18 @@ func (s UpdateFileSystemOutput) GoString() string { return s.String() } +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *UpdateFileSystemOutput) SetAvailabilityZoneId(v string) *UpdateFileSystemOutput { + s.AvailabilityZoneId = &v + return s +} + +// SetAvailabilityZoneName sets the AvailabilityZoneName field's value. +func (s *UpdateFileSystemOutput) SetAvailabilityZoneName(v string) *UpdateFileSystemOutput { + s.AvailabilityZoneName = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *UpdateFileSystemOutput) SetCreationTime(v time.Time) *UpdateFileSystemOutput { s.CreationTime = &v @@ -7718,8 +7911,8 @@ func (s *UpdateFileSystemOutput) SetThroughputMode(v string) *UpdateFileSystemOu return s } -// Returned if the AWS Backup service is not available in the region that the -// request was made. +// Returned if the AWS Backup service is not available in the Region in which +// the request was made. type ValidationException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -7793,6 +7986,9 @@ const ( // LifeCycleStateDeleted is a LifeCycleState enum value LifeCycleStateDeleted = "deleted" + + // LifeCycleStateError is a LifeCycleState enum value + LifeCycleStateError = "error" ) // LifeCycleState_Values returns all elements of the LifeCycleState enum @@ -7803,6 +7999,7 @@ func LifeCycleState_Values() []string { LifeCycleStateUpdating, LifeCycleStateDeleting, LifeCycleStateDeleted, + LifeCycleStateError, } } diff --git a/service/efs/errors.go b/service/efs/errors.go index 7e2f36abb24..f832be62cfc 100644 --- a/service/efs/errors.go +++ b/service/efs/errors.go @@ -29,6 +29,15 @@ const ( // AWS account. ErrCodeAccessPointNotFound = "AccessPointNotFound" + // ErrCodeAvailabilityZonesMismatch for service response error code + // "AvailabilityZonesMismatch". + // + // Returned if the Availability Zone that was specified for a mount target is + // different from the Availability Zone that was specified for One Zone storage + // classes. For more information, see Regional and One Zone storage redundancy + // (https://docs.aws.amazon.com/efs/latest/ug/availability-durability.html). + ErrCodeAvailabilityZonesMismatch = "AvailabilityZonesMismatch" + // ErrCodeBadRequest for service response error code // "BadRequest". // @@ -89,7 +98,7 @@ const ( // This value might be returned when you try to create a file system in provisioned // throughput mode, when you attempt to increase the provisioned throughput // of an existing file system, or when you attempt to change an existing file - // system from bursting to provisioned throughput mode. + // system from bursting to provisioned throughput mode. Try again later. ErrCodeInsufficientThroughputCapacity = "InsufficientThroughputCapacity" // ErrCodeInternalServerError for service response error code @@ -188,13 +197,16 @@ const ( // ErrCodeUnsupportedAvailabilityZone for service response error code // "UnsupportedAvailabilityZone". + // + // Returned if the requested Amazon EFS functionality is not available in the + // specified Availability Zone. ErrCodeUnsupportedAvailabilityZone = "UnsupportedAvailabilityZone" // ErrCodeValidationException for service response error code // "ValidationException". // - // Returned if the AWS Backup service is not available in the region that the - // request was made. + // Returned if the AWS Backup service is not available in the Region in which + // the request was made. ErrCodeValidationException = "ValidationException" ) @@ -202,6 +214,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "AccessPointAlreadyExists": newErrorAccessPointAlreadyExists, "AccessPointLimitExceeded": newErrorAccessPointLimitExceeded, "AccessPointNotFound": newErrorAccessPointNotFound, + "AvailabilityZonesMismatch": newErrorAvailabilityZonesMismatch, "BadRequest": newErrorBadRequest, "DependencyTimeout": newErrorDependencyTimeout, "FileSystemAlreadyExists": newErrorFileSystemAlreadyExists, diff --git a/service/efs/examples_test.go b/service/efs/examples_test.go index 57cf1e41452..4fb1954d30a 100644 --- a/service/efs/examples_test.go +++ b/service/efs/examples_test.go @@ -27,12 +27,14 @@ func parseTime(layout, value string) *time.Time { // To create a new file system // -// This operation creates a new file system with the default generalpurpose performance -// mode. +// This operation creates a new, encrypted file system with automatic backups enabled, +// and the default generalpurpose performance mode. func ExampleEFS_CreateFileSystem_shared00() { svc := efs.New(session.New()) input := &efs.CreateFileSystemInput{ + Backup: aws.Bool(true), CreationToken: aws.String("tokenstring"), + Encrypted: aws.Bool(true), PerformanceMode: aws.String("generalPurpose"), Tags: []*efs.Tag{ { @@ -58,6 +60,8 @@ func ExampleEFS_CreateFileSystem_shared00() { fmt.Println(efs.ErrCodeInsufficientThroughputCapacity, aerr.Error()) case efs.ErrCodeThroughputLimitExceeded: fmt.Println(efs.ErrCodeThroughputLimitExceeded, aerr.Error()) + case efs.ErrCodeUnsupportedAvailabilityZone: + fmt.Println(efs.ErrCodeUnsupportedAvailabilityZone, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -110,6 +114,8 @@ func ExampleEFS_CreateMountTarget_shared00() { fmt.Println(efs.ErrCodeSecurityGroupNotFound, aerr.Error()) case efs.ErrCodeUnsupportedAvailabilityZone: fmt.Println(efs.ErrCodeUnsupportedAvailabilityZone, aerr.Error()) + case efs.ErrCodeAvailabilityZonesMismatch: + fmt.Println(efs.ErrCodeAvailabilityZonesMismatch, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/rds/api.go b/service/rds/api.go index d516fc54656..a68d2f7cada 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -2259,7 +2259,7 @@ func (c *RDS) CreateDBProxyRequest(input *CreateDBProxyInput) (req *request.Requ // The requested subnet is invalid, or multiple subnets were requested that // are not all in a common VPC. // -// * ErrCodeDBProxyAlreadyExistsFault "DBProxyTargetExistsFault" +// * ErrCodeDBProxyAlreadyExistsFault "DBProxyAlreadyExistsFault" // The specified proxy name must be unique for all proxies owned by your AWS // account in the specified AWS Region. // @@ -2289,6 +2289,103 @@ func (c *RDS) CreateDBProxyWithContext(ctx aws.Context, input *CreateDBProxyInpu return out, req.Send() } +const opCreateDBProxyEndpoint = "CreateDBProxyEndpoint" + +// CreateDBProxyEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBProxyEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDBProxyEndpoint for more information on using the CreateDBProxyEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDBProxyEndpointRequest method. +// req, resp := client.CreateDBProxyEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateDBProxyEndpoint +func (c *RDS) CreateDBProxyEndpointRequest(input *CreateDBProxyEndpointInput) (req *request.Request, output *CreateDBProxyEndpointOutput) { + op := &request.Operation{ + Name: opCreateDBProxyEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBProxyEndpointInput{} + } + + output = &CreateDBProxyEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDBProxyEndpoint API operation for Amazon Relational Database Service. +// +// Creates a DBProxyEndpoint. Only applies to proxies that are associated with +// Aurora DB clusters. You can use DB proxy endpoints to specify read/write +// or read-only access to the DB cluster. You can also use DB proxy endpoints +// to access a DB proxy through a different VPC than the proxy's default VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation CreateDBProxyEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidSubnet "InvalidSubnet" +// The requested subnet is invalid, or multiple subnets were requested that +// are not all in a common VPC. +// +// * ErrCodeDBProxyNotFoundFault "DBProxyNotFoundFault" +// The specified proxy name doesn't correspond to a proxy owned by your AWS +// account in the specified AWS Region. +// +// * ErrCodeDBProxyEndpointAlreadyExistsFault "DBProxyEndpointAlreadyExistsFault" +// The specified DB proxy endpoint name must be unique for all DB proxy endpoints +// owned by your AWS account in the specified AWS Region. +// +// * ErrCodeDBProxyEndpointQuotaExceededFault "DBProxyEndpointQuotaExceededFault" +// The DB proxy already has the maximum number of endpoints. +// +// * ErrCodeInvalidDBProxyStateFault "InvalidDBProxyStateFault" +// The requested operation can't be performed while the proxy is in this state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateDBProxyEndpoint +func (c *RDS) CreateDBProxyEndpoint(input *CreateDBProxyEndpointInput) (*CreateDBProxyEndpointOutput, error) { + req, out := c.CreateDBProxyEndpointRequest(input) + return out, req.Send() +} + +// CreateDBProxyEndpointWithContext is the same as CreateDBProxyEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDBProxyEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) CreateDBProxyEndpointWithContext(ctx aws.Context, input *CreateDBProxyEndpointInput, opts ...request.Option) (*CreateDBProxyEndpointOutput, error) { + req, out := c.CreateDBProxyEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDBSecurityGroup = "CreateDBSecurityGroup" // CreateDBSecurityGroupRequest generates a "aws/request.Request" representing the @@ -3656,7 +3753,7 @@ func (c *RDS) DeleteDBProxyRequest(input *DeleteDBProxyInput) (req *request.Requ // DeleteDBProxy API operation for Amazon Relational Database Service. // -// Deletes an existing proxy. +// Deletes an existing DB proxy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3695,6 +3792,92 @@ func (c *RDS) DeleteDBProxyWithContext(ctx aws.Context, input *DeleteDBProxyInpu return out, req.Send() } +const opDeleteDBProxyEndpoint = "DeleteDBProxyEndpoint" + +// DeleteDBProxyEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBProxyEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDBProxyEndpoint for more information on using the DeleteDBProxyEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDBProxyEndpointRequest method. +// req, resp := client.DeleteDBProxyEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteDBProxyEndpoint +func (c *RDS) DeleteDBProxyEndpointRequest(input *DeleteDBProxyEndpointInput) (req *request.Request, output *DeleteDBProxyEndpointOutput) { + op := &request.Operation{ + Name: opDeleteDBProxyEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBProxyEndpointInput{} + } + + output = &DeleteDBProxyEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteDBProxyEndpoint API operation for Amazon Relational Database Service. +// +// Deletes a DBProxyEndpoint. Doing so removes the ability to access the DB +// proxy using the endpoint that you defined. The endpoint that you delete might +// have provided capabilities such as read/write or read-only operations, or +// using a different VPC than the DB proxy's default VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteDBProxyEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDBProxyEndpointNotFoundFault "DBProxyEndpointNotFoundFault" +// The DB proxy endpoint doesn't exist. +// +// * ErrCodeInvalidDBProxyEndpointStateFault "InvalidDBProxyEndpointStateFault" +// You can't perform this operation while the DB proxy endpoint is in a particular +// state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteDBProxyEndpoint +func (c *RDS) DeleteDBProxyEndpoint(input *DeleteDBProxyEndpointInput) (*DeleteDBProxyEndpointOutput, error) { + req, out := c.DeleteDBProxyEndpointRequest(input) + return out, req.Send() +} + +// DeleteDBProxyEndpointWithContext is the same as DeleteDBProxyEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDBProxyEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteDBProxyEndpointWithContext(ctx aws.Context, input *DeleteDBProxyEndpointInput, opts ...request.Option) (*DeleteDBProxyEndpointOutput, error) { + req, out := c.DeleteDBProxyEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDBSecurityGroup = "DeleteDBSecurityGroup" // DeleteDBSecurityGroupRequest generates a "aws/request.Request" representing the @@ -6655,6 +6838,147 @@ func (c *RDS) DescribeDBProxiesPagesWithContext(ctx aws.Context, input *Describe return p.Err() } +const opDescribeDBProxyEndpoints = "DescribeDBProxyEndpoints" + +// DescribeDBProxyEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBProxyEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDBProxyEndpoints for more information on using the DescribeDBProxyEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDBProxyEndpointsRequest method. +// req, resp := client.DescribeDBProxyEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeDBProxyEndpoints +func (c *RDS) DescribeDBProxyEndpointsRequest(input *DescribeDBProxyEndpointsInput) (req *request.Request, output *DescribeDBProxyEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeDBProxyEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBProxyEndpointsInput{} + } + + output = &DescribeDBProxyEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDBProxyEndpoints API operation for Amazon Relational Database Service. +// +// Returns information about DB proxy endpoints. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeDBProxyEndpoints for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDBProxyNotFoundFault "DBProxyNotFoundFault" +// The specified proxy name doesn't correspond to a proxy owned by your AWS +// account in the specified AWS Region. +// +// * ErrCodeDBProxyEndpointNotFoundFault "DBProxyEndpointNotFoundFault" +// The DB proxy endpoint doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeDBProxyEndpoints +func (c *RDS) DescribeDBProxyEndpoints(input *DescribeDBProxyEndpointsInput) (*DescribeDBProxyEndpointsOutput, error) { + req, out := c.DescribeDBProxyEndpointsRequest(input) + return out, req.Send() +} + +// DescribeDBProxyEndpointsWithContext is the same as DescribeDBProxyEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDBProxyEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBProxyEndpointsWithContext(ctx aws.Context, input *DescribeDBProxyEndpointsInput, opts ...request.Option) (*DescribeDBProxyEndpointsOutput, error) { + req, out := c.DescribeDBProxyEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDBProxyEndpointsPages iterates over the pages of a DescribeDBProxyEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBProxyEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBProxyEndpoints operation. +// pageNum := 0 +// err := client.DescribeDBProxyEndpointsPages(params, +// func(page *rds.DescribeDBProxyEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBProxyEndpointsPages(input *DescribeDBProxyEndpointsInput, fn func(*DescribeDBProxyEndpointsOutput, bool) bool) error { + return c.DescribeDBProxyEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBProxyEndpointsPagesWithContext same as DescribeDBProxyEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBProxyEndpointsPagesWithContext(ctx aws.Context, input *DescribeDBProxyEndpointsInput, fn func(*DescribeDBProxyEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBProxyEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBProxyEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBProxyEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBProxyTargetGroups = "DescribeDBProxyTargetGroups" // DescribeDBProxyTargetGroupsRequest generates a "aws/request.Request" representing the @@ -10950,7 +11274,7 @@ func (c *RDS) ModifyDBProxyRequest(input *ModifyDBProxyInput) (req *request.Requ // The specified proxy name doesn't correspond to a proxy owned by your AWS // account in the specified AWS Region. // -// * ErrCodeDBProxyAlreadyExistsFault "DBProxyTargetExistsFault" +// * ErrCodeDBProxyAlreadyExistsFault "DBProxyAlreadyExistsFault" // The specified proxy name must be unique for all proxies owned by your AWS // account in the specified AWS Region. // @@ -10979,6 +11303,96 @@ func (c *RDS) ModifyDBProxyWithContext(ctx aws.Context, input *ModifyDBProxyInpu return out, req.Send() } +const opModifyDBProxyEndpoint = "ModifyDBProxyEndpoint" + +// ModifyDBProxyEndpointRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBProxyEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyDBProxyEndpoint for more information on using the ModifyDBProxyEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyDBProxyEndpointRequest method. +// req, resp := client.ModifyDBProxyEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBProxyEndpoint +func (c *RDS) ModifyDBProxyEndpointRequest(input *ModifyDBProxyEndpointInput) (req *request.Request, output *ModifyDBProxyEndpointOutput) { + op := &request.Operation{ + Name: opModifyDBProxyEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBProxyEndpointInput{} + } + + output = &ModifyDBProxyEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyDBProxyEndpoint API operation for Amazon Relational Database Service. +// +// Changes the settings for an existing DB proxy endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation ModifyDBProxyEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDBProxyEndpointNotFoundFault "DBProxyEndpointNotFoundFault" +// The DB proxy endpoint doesn't exist. +// +// * ErrCodeDBProxyEndpointAlreadyExistsFault "DBProxyEndpointAlreadyExistsFault" +// The specified DB proxy endpoint name must be unique for all DB proxy endpoints +// owned by your AWS account in the specified AWS Region. +// +// * ErrCodeInvalidDBProxyEndpointStateFault "InvalidDBProxyEndpointStateFault" +// You can't perform this operation while the DB proxy endpoint is in a particular +// state. +// +// * ErrCodeInvalidDBProxyStateFault "InvalidDBProxyStateFault" +// The requested operation can't be performed while the proxy is in this state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBProxyEndpoint +func (c *RDS) ModifyDBProxyEndpoint(input *ModifyDBProxyEndpointInput) (*ModifyDBProxyEndpointOutput, error) { + req, out := c.ModifyDBProxyEndpointRequest(input) + return out, req.Send() +} + +// ModifyDBProxyEndpointWithContext is the same as ModifyDBProxyEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyDBProxyEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) ModifyDBProxyEndpointWithContext(ctx aws.Context, input *ModifyDBProxyEndpointInput, opts ...request.Option) (*ModifyDBProxyEndpointOutput, error) { + req, out := c.ModifyDBProxyEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyDBProxyTargetGroup = "ModifyDBProxyTargetGroup" // ModifyDBProxyTargetGroupRequest generates a "aws/request.Request" representing the @@ -20022,6 +20436,135 @@ func (s *CreateDBParameterGroupOutput) SetDBParameterGroup(v *DBParameterGroup) return s } +type CreateDBProxyEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the DB proxy endpoint to create. + // + // DBProxyEndpointName is a required field + DBProxyEndpointName *string `min:"1" type:"string" required:"true"` + + // The name of the DB proxy associated with the DB proxy endpoint that you create. + // + // DBProxyName is a required field + DBProxyName *string `min:"1" type:"string" required:"true"` + + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) + // in the Amazon RDS User Guide. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A value that indicates whether the DB proxy endpoint can be used for read/write + // or read-only operations. The default is READ_WRITE. + TargetRole *string `type:"string" enum:"DBProxyEndpointTargetRole"` + + // The VPC security group IDs for the DB proxy endpoint that you create. You + // can specify a different set of security group IDs than for the original DB + // proxy. The default is the default security group for the VPC. + VpcSecurityGroupIds []*string `type:"list"` + + // The VPC subnet IDs for the DB proxy endpoint that you create. You can specify + // a different set of subnet IDs than for the original DB proxy. + // + // VpcSubnetIds is a required field + VpcSubnetIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateDBProxyEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBProxyEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBProxyEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBProxyEndpointInput"} + if s.DBProxyEndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("DBProxyEndpointName")) + } + if s.DBProxyEndpointName != nil && len(*s.DBProxyEndpointName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBProxyEndpointName", 1)) + } + if s.DBProxyName == nil { + invalidParams.Add(request.NewErrParamRequired("DBProxyName")) + } + if s.DBProxyName != nil && len(*s.DBProxyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBProxyName", 1)) + } + if s.VpcSubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("VpcSubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBProxyEndpointName sets the DBProxyEndpointName field's value. +func (s *CreateDBProxyEndpointInput) SetDBProxyEndpointName(v string) *CreateDBProxyEndpointInput { + s.DBProxyEndpointName = &v + return s +} + +// SetDBProxyName sets the DBProxyName field's value. +func (s *CreateDBProxyEndpointInput) SetDBProxyName(v string) *CreateDBProxyEndpointInput { + s.DBProxyName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDBProxyEndpointInput) SetTags(v []*Tag) *CreateDBProxyEndpointInput { + s.Tags = v + return s +} + +// SetTargetRole sets the TargetRole field's value. +func (s *CreateDBProxyEndpointInput) SetTargetRole(v string) *CreateDBProxyEndpointInput { + s.TargetRole = &v + return s +} + +// SetVpcSecurityGroupIds sets the VpcSecurityGroupIds field's value. +func (s *CreateDBProxyEndpointInput) SetVpcSecurityGroupIds(v []*string) *CreateDBProxyEndpointInput { + s.VpcSecurityGroupIds = v + return s +} + +// SetVpcSubnetIds sets the VpcSubnetIds field's value. +func (s *CreateDBProxyEndpointInput) SetVpcSubnetIds(v []*string) *CreateDBProxyEndpointInput { + s.VpcSubnetIds = v + return s +} + +type CreateDBProxyEndpointOutput struct { + _ struct{} `type:"structure"` + + // The DBProxyEndpoint object that is created by the API operation. The DB proxy + // endpoint that you create might provide capabilities such as read/write or + // read-only operations, or using a different VPC than the proxy's default VPC. + DBProxyEndpoint *DBProxyEndpoint `type:"structure"` +} + +// String returns the string representation +func (s CreateDBProxyEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBProxyEndpointOutput) GoString() string { + return s.String() +} + +// SetDBProxyEndpoint sets the DBProxyEndpoint field's value. +func (s *CreateDBProxyEndpointOutput) SetDBProxyEndpoint(v *DBProxyEndpoint) *CreateDBProxyEndpointOutput { + s.DBProxyEndpoint = v + return s +} + type CreateDBProxyInput struct { _ struct{} `type:"structure"` @@ -23722,8 +24265,8 @@ type DBProxy struct { // in the logs. DebugLogging *bool `type:"boolean"` - // The endpoint that you can use to connect to the proxy. You include the endpoint - // value in the connection string for a database client application. + // The endpoint that you can use to connect to the DB proxy. You include the + // endpoint value in the connection string for a database client application. Endpoint *string `type:"string"` // The engine family applies to MySQL and PostgreSQL for both RDS and Aurora. @@ -23755,6 +24298,9 @@ type DBProxy struct { // The date and time when the proxy was last updated. UpdatedDate *time.Time `type:"timestamp"` + // Provides the VPC ID of the DB proxy. + VpcId *string `type:"string"` + // Provides a list of VPC security groups that the proxy belongs to. VpcSecurityGroupIds []*string `type:"list"` @@ -23844,6 +24390,12 @@ func (s *DBProxy) SetUpdatedDate(v time.Time) *DBProxy { return s } +// SetVpcId sets the VpcId field's value. +func (s *DBProxy) SetVpcId(v string) *DBProxy { + s.VpcId = &v + return s +} + // SetVpcSecurityGroupIds sets the VpcSecurityGroupIds field's value. func (s *DBProxy) SetVpcSecurityGroupIds(v []*string) *DBProxy { s.VpcSecurityGroupIds = v @@ -23856,6 +24408,138 @@ func (s *DBProxy) SetVpcSubnetIds(v []*string) *DBProxy { return s } +// The data structure representing an endpoint associated with a DB proxy. RDS +// automatically creates one endpoint for each DB proxy. For Aurora DB clusters, +// you can associate additional endpoints with the same DB proxy. These endpoints +// can be read/write or read-only. They can also reside in different VPCs than +// the associated DB proxy. +// +// This data type is used as a response element in the DescribeDBProxyEndpoints +// operation. +type DBProxyEndpoint struct { + _ struct{} `type:"structure"` + + // The date and time when the DB proxy endpoint was first created. + CreatedDate *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) for the DB proxy endpoint. + DBProxyEndpointArn *string `type:"string"` + + // The name for the DB proxy endpoint. An identifier must begin with a letter + // and must contain only ASCII letters, digits, and hyphens; it can't end with + // a hyphen or contain two consecutive hyphens. + DBProxyEndpointName *string `type:"string"` + + // The identifier for the DB proxy that is associated with this DB proxy endpoint. + DBProxyName *string `type:"string"` + + // The endpoint that you can use to connect to the DB proxy. You include the + // endpoint value in the connection string for a database client application. + Endpoint *string `type:"string"` + + // A value that indicates whether this endpoint is the default endpoint for + // the associated DB proxy. Default DB proxy endpoints always have read/write + // capability. Other endpoints that you associate with the DB proxy can be either + // read/write or read-only. + IsDefault *bool `type:"boolean"` + + // The current status of this DB proxy endpoint. A status of available means + // the endpoint is ready to handle requests. Other values indicate that you + // must wait for the endpoint to be ready, or take some action to resolve an + // issue. + Status *string `type:"string" enum:"DBProxyEndpointStatus"` + + // A value that indicates whether the DB proxy endpoint can be used for read/write + // or read-only operations. + TargetRole *string `type:"string" enum:"DBProxyEndpointTargetRole"` + + // Provides the VPC ID of the DB proxy endpoint. + VpcId *string `type:"string"` + + // Provides a list of VPC security groups that the DB proxy endpoint belongs + // to. + VpcSecurityGroupIds []*string `type:"list"` + + // The EC2 subnet IDs for the DB proxy endpoint. + VpcSubnetIds []*string `type:"list"` +} + +// String returns the string representation +func (s DBProxyEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBProxyEndpoint) GoString() string { + return s.String() +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *DBProxyEndpoint) SetCreatedDate(v time.Time) *DBProxyEndpoint { + s.CreatedDate = &v + return s +} + +// SetDBProxyEndpointArn sets the DBProxyEndpointArn field's value. +func (s *DBProxyEndpoint) SetDBProxyEndpointArn(v string) *DBProxyEndpoint { + s.DBProxyEndpointArn = &v + return s +} + +// SetDBProxyEndpointName sets the DBProxyEndpointName field's value. +func (s *DBProxyEndpoint) SetDBProxyEndpointName(v string) *DBProxyEndpoint { + s.DBProxyEndpointName = &v + return s +} + +// SetDBProxyName sets the DBProxyName field's value. +func (s *DBProxyEndpoint) SetDBProxyName(v string) *DBProxyEndpoint { + s.DBProxyName = &v + return s +} + +// SetEndpoint sets the Endpoint field's value. +func (s *DBProxyEndpoint) SetEndpoint(v string) *DBProxyEndpoint { + s.Endpoint = &v + return s +} + +// SetIsDefault sets the IsDefault field's value. +func (s *DBProxyEndpoint) SetIsDefault(v bool) *DBProxyEndpoint { + s.IsDefault = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DBProxyEndpoint) SetStatus(v string) *DBProxyEndpoint { + s.Status = &v + return s +} + +// SetTargetRole sets the TargetRole field's value. +func (s *DBProxyEndpoint) SetTargetRole(v string) *DBProxyEndpoint { + s.TargetRole = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DBProxyEndpoint) SetVpcId(v string) *DBProxyEndpoint { + s.VpcId = &v + return s +} + +// SetVpcSecurityGroupIds sets the VpcSecurityGroupIds field's value. +func (s *DBProxyEndpoint) SetVpcSecurityGroupIds(v []*string) *DBProxyEndpoint { + s.VpcSecurityGroupIds = v + return s +} + +// SetVpcSubnetIds sets the VpcSubnetIds field's value. +func (s *DBProxyEndpoint) SetVpcSubnetIds(v []*string) *DBProxyEndpoint { + s.VpcSubnetIds = v + return s +} + // Contains the details for an RDS Proxy target. It represents an RDS DB instance // or Aurora DB cluster that the proxy can connect to. One or more targets are // associated with an RDS Proxy target group. @@ -23876,6 +24560,10 @@ type DBProxyTarget struct { // for an RDS DB instance, or the cluster identifier for an Aurora DB cluster. RdsResourceId *string `type:"string"` + // A value that indicates whether the target of the proxy can be used for read/write + // or read-only operations. + Role *string `type:"string" enum:"TargetRole"` + // The Amazon Resource Name (ARN) for the RDS DB instance or Aurora DB cluster. TargetArn *string `type:"string"` @@ -23919,6 +24607,12 @@ func (s *DBProxyTarget) SetRdsResourceId(v string) *DBProxyTarget { return s } +// SetRole sets the Role field's value. +func (s *DBProxyTarget) SetRole(v string) *DBProxyTarget { + s.Role = &v + return s +} + // SetTargetArn sets the TargetArn field's value. func (s *DBProxyTarget) SetTargetArn(v string) *DBProxyTarget { s.TargetArn = &v @@ -25318,6 +26012,71 @@ func (s DeleteDBParameterGroupOutput) GoString() string { return s.String() } +type DeleteDBProxyEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the DB proxy endpoint to delete. + // + // DBProxyEndpointName is a required field + DBProxyEndpointName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBProxyEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBProxyEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBProxyEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBProxyEndpointInput"} + if s.DBProxyEndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("DBProxyEndpointName")) + } + if s.DBProxyEndpointName != nil && len(*s.DBProxyEndpointName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBProxyEndpointName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBProxyEndpointName sets the DBProxyEndpointName field's value. +func (s *DeleteDBProxyEndpointInput) SetDBProxyEndpointName(v string) *DeleteDBProxyEndpointInput { + s.DBProxyEndpointName = &v + return s +} + +type DeleteDBProxyEndpointOutput struct { + _ struct{} `type:"structure"` + + // The data structure representing the details of the DB proxy endpoint that + // you delete. + DBProxyEndpoint *DBProxyEndpoint `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBProxyEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBProxyEndpointOutput) GoString() string { + return s.String() +} + +// SetDBProxyEndpoint sets the DBProxyEndpoint field's value. +func (s *DeleteDBProxyEndpointOutput) SetDBProxyEndpoint(v *DBProxyEndpoint) *DeleteDBProxyEndpointOutput { + s.DBProxyEndpoint = v + return s +} + type DeleteDBProxyInput struct { _ struct{} `type:"structure"` @@ -28122,7 +28881,8 @@ func (s *DescribeDBParametersOutput) SetParameters(v []*Parameter) *DescribeDBPa type DescribeDBProxiesInput struct { _ struct{} `type:"structure"` - // The name of the DB proxy. + // The name of the DB proxy. If you omit this parameter, the output includes + // information about all DB proxies owned by your AWS account ID. DBProxyName *string `type:"string"` // This parameter is not currently supported. @@ -28234,6 +28994,140 @@ func (s *DescribeDBProxiesOutput) SetMarker(v string) *DescribeDBProxiesOutput { return s } +type DescribeDBProxyEndpointsInput struct { + _ struct{} `type:"structure"` + + // The name of a DB proxy endpoint to describe. If you omit this parameter, + // the output includes information about all DB proxy endpoints associated with + // the specified proxy. + DBProxyEndpointName *string `min:"1" type:"string"` + + // The name of the DB proxy whose endpoints you want to describe. If you omit + // this parameter, the output includes information about all DB proxy endpoints + // associated with all your DB proxies. + DBProxyName *string `min:"1" type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `min:"20" type:"integer"` +} + +// String returns the string representation +func (s DescribeDBProxyEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBProxyEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBProxyEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBProxyEndpointsInput"} + if s.DBProxyEndpointName != nil && len(*s.DBProxyEndpointName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBProxyEndpointName", 1)) + } + if s.DBProxyName != nil && len(*s.DBProxyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBProxyName", 1)) + } + if s.MaxRecords != nil && *s.MaxRecords < 20 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 20)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBProxyEndpointName sets the DBProxyEndpointName field's value. +func (s *DescribeDBProxyEndpointsInput) SetDBProxyEndpointName(v string) *DescribeDBProxyEndpointsInput { + s.DBProxyEndpointName = &v + return s +} + +// SetDBProxyName sets the DBProxyName field's value. +func (s *DescribeDBProxyEndpointsInput) SetDBProxyName(v string) *DescribeDBProxyEndpointsInput { + s.DBProxyName = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeDBProxyEndpointsInput) SetFilters(v []*Filter) *DescribeDBProxyEndpointsInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeDBProxyEndpointsInput) SetMarker(v string) *DescribeDBProxyEndpointsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeDBProxyEndpointsInput) SetMaxRecords(v int64) *DescribeDBProxyEndpointsInput { + s.MaxRecords = &v + return s +} + +type DescribeDBProxyEndpointsOutput struct { + _ struct{} `type:"structure"` + + // The list of ProxyEndpoint objects returned by the API operation. + DBProxyEndpoints []*DBProxyEndpoint `type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBProxyEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBProxyEndpointsOutput) GoString() string { + return s.String() +} + +// SetDBProxyEndpoints sets the DBProxyEndpoints field's value. +func (s *DescribeDBProxyEndpointsOutput) SetDBProxyEndpoints(v []*DBProxyEndpoint) *DescribeDBProxyEndpointsOutput { + s.DBProxyEndpoints = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeDBProxyEndpointsOutput) SetMarker(v string) *DescribeDBProxyEndpointsOutput { + s.Marker = &v + return s +} + type DescribeDBProxyTargetGroupsInput struct { _ struct{} `type:"structure"` @@ -34882,6 +35776,97 @@ func (s *ModifyDBParameterGroupInput) SetParameters(v []*Parameter) *ModifyDBPar return s } +type ModifyDBProxyEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the DB proxy sociated with the DB proxy endpoint that you want + // to modify. + // + // DBProxyEndpointName is a required field + DBProxyEndpointName *string `min:"1" type:"string" required:"true"` + + // The new identifier for the DBProxyEndpoint. An identifier must begin with + // a letter and must contain only ASCII letters, digits, and hyphens; it can't + // end with a hyphen or contain two consecutive hyphens. + NewDBProxyEndpointName *string `min:"1" type:"string"` + + // The VPC security group IDs for the DB proxy endpoint. When the DB proxy endpoint + // uses a different VPC than the original proxy, you also specify a different + // set of security group IDs than for the original proxy. + VpcSecurityGroupIds []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyDBProxyEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBProxyEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBProxyEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBProxyEndpointInput"} + if s.DBProxyEndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("DBProxyEndpointName")) + } + if s.DBProxyEndpointName != nil && len(*s.DBProxyEndpointName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBProxyEndpointName", 1)) + } + if s.NewDBProxyEndpointName != nil && len(*s.NewDBProxyEndpointName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewDBProxyEndpointName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBProxyEndpointName sets the DBProxyEndpointName field's value. +func (s *ModifyDBProxyEndpointInput) SetDBProxyEndpointName(v string) *ModifyDBProxyEndpointInput { + s.DBProxyEndpointName = &v + return s +} + +// SetNewDBProxyEndpointName sets the NewDBProxyEndpointName field's value. +func (s *ModifyDBProxyEndpointInput) SetNewDBProxyEndpointName(v string) *ModifyDBProxyEndpointInput { + s.NewDBProxyEndpointName = &v + return s +} + +// SetVpcSecurityGroupIds sets the VpcSecurityGroupIds field's value. +func (s *ModifyDBProxyEndpointInput) SetVpcSecurityGroupIds(v []*string) *ModifyDBProxyEndpointInput { + s.VpcSecurityGroupIds = v + return s +} + +type ModifyDBProxyEndpointOutput struct { + _ struct{} `type:"structure"` + + // The DBProxyEndpoint object representing the new settings for the DB proxy + // endpoint. + DBProxyEndpoint *DBProxyEndpoint `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBProxyEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBProxyEndpointOutput) GoString() string { + return s.String() +} + +// SetDBProxyEndpoint sets the DBProxyEndpoint field's value. +func (s *ModifyDBProxyEndpointOutput) SetDBProxyEndpoint(v *DBProxyEndpoint) *ModifyDBProxyEndpointOutput { + s.DBProxyEndpoint = v + return s +} + type ModifyDBProxyInput struct { _ struct{} `type:"structure"` @@ -43551,6 +44536,54 @@ func AuthScheme_Values() []string { } } +const ( + // DBProxyEndpointStatusAvailable is a DBProxyEndpointStatus enum value + DBProxyEndpointStatusAvailable = "available" + + // DBProxyEndpointStatusModifying is a DBProxyEndpointStatus enum value + DBProxyEndpointStatusModifying = "modifying" + + // DBProxyEndpointStatusIncompatibleNetwork is a DBProxyEndpointStatus enum value + DBProxyEndpointStatusIncompatibleNetwork = "incompatible-network" + + // DBProxyEndpointStatusInsufficientResourceLimits is a DBProxyEndpointStatus enum value + DBProxyEndpointStatusInsufficientResourceLimits = "insufficient-resource-limits" + + // DBProxyEndpointStatusCreating is a DBProxyEndpointStatus enum value + DBProxyEndpointStatusCreating = "creating" + + // DBProxyEndpointStatusDeleting is a DBProxyEndpointStatus enum value + DBProxyEndpointStatusDeleting = "deleting" +) + +// DBProxyEndpointStatus_Values returns all elements of the DBProxyEndpointStatus enum +func DBProxyEndpointStatus_Values() []string { + return []string{ + DBProxyEndpointStatusAvailable, + DBProxyEndpointStatusModifying, + DBProxyEndpointStatusIncompatibleNetwork, + DBProxyEndpointStatusInsufficientResourceLimits, + DBProxyEndpointStatusCreating, + DBProxyEndpointStatusDeleting, + } +} + +const ( + // DBProxyEndpointTargetRoleReadWrite is a DBProxyEndpointTargetRole enum value + DBProxyEndpointTargetRoleReadWrite = "READ_WRITE" + + // DBProxyEndpointTargetRoleReadOnly is a DBProxyEndpointTargetRole enum value + DBProxyEndpointTargetRoleReadOnly = "READ_ONLY" +) + +// DBProxyEndpointTargetRole_Values returns all elements of the DBProxyEndpointTargetRole enum +func DBProxyEndpointTargetRole_Values() []string { + return []string{ + DBProxyEndpointTargetRoleReadWrite, + DBProxyEndpointTargetRoleReadOnly, + } +} + const ( // DBProxyStatusAvailable is a DBProxyStatus enum value DBProxyStatusAvailable = "available" @@ -43707,6 +44740,9 @@ const ( // TargetHealthReasonPendingProxyCapacity is a TargetHealthReason enum value TargetHealthReasonPendingProxyCapacity = "PENDING_PROXY_CAPACITY" + + // TargetHealthReasonInvalidReplicationState is a TargetHealthReason enum value + TargetHealthReasonInvalidReplicationState = "INVALID_REPLICATION_STATE" ) // TargetHealthReason_Values returns all elements of the TargetHealthReason enum @@ -43716,6 +44752,27 @@ func TargetHealthReason_Values() []string { TargetHealthReasonConnectionFailed, TargetHealthReasonAuthFailure, TargetHealthReasonPendingProxyCapacity, + TargetHealthReasonInvalidReplicationState, + } +} + +const ( + // TargetRoleReadWrite is a TargetRole enum value + TargetRoleReadWrite = "READ_WRITE" + + // TargetRoleReadOnly is a TargetRole enum value + TargetRoleReadOnly = "READ_ONLY" + + // TargetRoleUnknown is a TargetRole enum value + TargetRoleUnknown = "UNKNOWN" +) + +// TargetRole_Values returns all elements of the TargetRole enum +func TargetRole_Values() []string { + return []string{ + TargetRoleReadWrite, + TargetRoleReadOnly, + TargetRoleUnknown, } } diff --git a/service/rds/errors.go b/service/rds/errors.go index 06a6b13b239..c2d315c3677 100644 --- a/service/rds/errors.go +++ b/service/rds/errors.go @@ -213,11 +213,30 @@ const ( ErrCodeDBParameterGroupQuotaExceededFault = "DBParameterGroupQuotaExceeded" // ErrCodeDBProxyAlreadyExistsFault for service response error code - // "DBProxyTargetExistsFault". + // "DBProxyAlreadyExistsFault". // // The specified proxy name must be unique for all proxies owned by your AWS // account in the specified AWS Region. - ErrCodeDBProxyAlreadyExistsFault = "DBProxyTargetExistsFault" + ErrCodeDBProxyAlreadyExistsFault = "DBProxyAlreadyExistsFault" + + // ErrCodeDBProxyEndpointAlreadyExistsFault for service response error code + // "DBProxyEndpointAlreadyExistsFault". + // + // The specified DB proxy endpoint name must be unique for all DB proxy endpoints + // owned by your AWS account in the specified AWS Region. + ErrCodeDBProxyEndpointAlreadyExistsFault = "DBProxyEndpointAlreadyExistsFault" + + // ErrCodeDBProxyEndpointNotFoundFault for service response error code + // "DBProxyEndpointNotFoundFault". + // + // The DB proxy endpoint doesn't exist. + ErrCodeDBProxyEndpointNotFoundFault = "DBProxyEndpointNotFoundFault" + + // ErrCodeDBProxyEndpointQuotaExceededFault for service response error code + // "DBProxyEndpointQuotaExceededFault". + // + // The DB proxy already has the maximum number of endpoints. + ErrCodeDBProxyEndpointQuotaExceededFault = "DBProxyEndpointQuotaExceededFault" // ErrCodeDBProxyNotFoundFault for service response error code // "DBProxyNotFoundFault". @@ -489,6 +508,13 @@ const ( // is in this state. ErrCodeInvalidDBParameterGroupStateFault = "InvalidDBParameterGroupState" + // ErrCodeInvalidDBProxyEndpointStateFault for service response error code + // "InvalidDBProxyEndpointStateFault". + // + // You can't perform this operation while the DB proxy endpoint is in a particular + // state. + ErrCodeInvalidDBProxyEndpointStateFault = "InvalidDBProxyEndpointStateFault" + // ErrCodeInvalidDBProxyStateFault for service response error code // "InvalidDBProxyStateFault". // diff --git a/service/rds/rdsiface/interface.go b/service/rds/rdsiface/interface.go index a9cc0eec97d..4e447378ede 100644 --- a/service/rds/rdsiface/interface.go +++ b/service/rds/rdsiface/interface.go @@ -148,6 +148,10 @@ type RDSAPI interface { CreateDBProxyWithContext(aws.Context, *rds.CreateDBProxyInput, ...request.Option) (*rds.CreateDBProxyOutput, error) CreateDBProxyRequest(*rds.CreateDBProxyInput) (*request.Request, *rds.CreateDBProxyOutput) + CreateDBProxyEndpoint(*rds.CreateDBProxyEndpointInput) (*rds.CreateDBProxyEndpointOutput, error) + CreateDBProxyEndpointWithContext(aws.Context, *rds.CreateDBProxyEndpointInput, ...request.Option) (*rds.CreateDBProxyEndpointOutput, error) + CreateDBProxyEndpointRequest(*rds.CreateDBProxyEndpointInput) (*request.Request, *rds.CreateDBProxyEndpointOutput) + CreateDBSecurityGroup(*rds.CreateDBSecurityGroupInput) (*rds.CreateDBSecurityGroupOutput, error) CreateDBSecurityGroupWithContext(aws.Context, *rds.CreateDBSecurityGroupInput, ...request.Option) (*rds.CreateDBSecurityGroupOutput, error) CreateDBSecurityGroupRequest(*rds.CreateDBSecurityGroupInput) (*request.Request, *rds.CreateDBSecurityGroupOutput) @@ -208,6 +212,10 @@ type RDSAPI interface { DeleteDBProxyWithContext(aws.Context, *rds.DeleteDBProxyInput, ...request.Option) (*rds.DeleteDBProxyOutput, error) DeleteDBProxyRequest(*rds.DeleteDBProxyInput) (*request.Request, *rds.DeleteDBProxyOutput) + DeleteDBProxyEndpoint(*rds.DeleteDBProxyEndpointInput) (*rds.DeleteDBProxyEndpointOutput, error) + DeleteDBProxyEndpointWithContext(aws.Context, *rds.DeleteDBProxyEndpointInput, ...request.Option) (*rds.DeleteDBProxyEndpointOutput, error) + DeleteDBProxyEndpointRequest(*rds.DeleteDBProxyEndpointInput) (*request.Request, *rds.DeleteDBProxyEndpointOutput) + DeleteDBSecurityGroup(*rds.DeleteDBSecurityGroupInput) (*rds.DeleteDBSecurityGroupOutput, error) DeleteDBSecurityGroupWithContext(aws.Context, *rds.DeleteDBSecurityGroupInput, ...request.Option) (*rds.DeleteDBSecurityGroupOutput, error) DeleteDBSecurityGroupRequest(*rds.DeleteDBSecurityGroupInput) (*request.Request, *rds.DeleteDBSecurityGroupOutput) @@ -353,6 +361,13 @@ type RDSAPI interface { DescribeDBProxiesPages(*rds.DescribeDBProxiesInput, func(*rds.DescribeDBProxiesOutput, bool) bool) error DescribeDBProxiesPagesWithContext(aws.Context, *rds.DescribeDBProxiesInput, func(*rds.DescribeDBProxiesOutput, bool) bool, ...request.Option) error + DescribeDBProxyEndpoints(*rds.DescribeDBProxyEndpointsInput) (*rds.DescribeDBProxyEndpointsOutput, error) + DescribeDBProxyEndpointsWithContext(aws.Context, *rds.DescribeDBProxyEndpointsInput, ...request.Option) (*rds.DescribeDBProxyEndpointsOutput, error) + DescribeDBProxyEndpointsRequest(*rds.DescribeDBProxyEndpointsInput) (*request.Request, *rds.DescribeDBProxyEndpointsOutput) + + DescribeDBProxyEndpointsPages(*rds.DescribeDBProxyEndpointsInput, func(*rds.DescribeDBProxyEndpointsOutput, bool) bool) error + DescribeDBProxyEndpointsPagesWithContext(aws.Context, *rds.DescribeDBProxyEndpointsInput, func(*rds.DescribeDBProxyEndpointsOutput, bool) bool, ...request.Option) error + DescribeDBProxyTargetGroups(*rds.DescribeDBProxyTargetGroupsInput) (*rds.DescribeDBProxyTargetGroupsOutput, error) DescribeDBProxyTargetGroupsWithContext(aws.Context, *rds.DescribeDBProxyTargetGroupsInput, ...request.Option) (*rds.DescribeDBProxyTargetGroupsOutput, error) DescribeDBProxyTargetGroupsRequest(*rds.DescribeDBProxyTargetGroupsInput) (*request.Request, *rds.DescribeDBProxyTargetGroupsOutput) @@ -554,6 +569,10 @@ type RDSAPI interface { ModifyDBProxyWithContext(aws.Context, *rds.ModifyDBProxyInput, ...request.Option) (*rds.ModifyDBProxyOutput, error) ModifyDBProxyRequest(*rds.ModifyDBProxyInput) (*request.Request, *rds.ModifyDBProxyOutput) + ModifyDBProxyEndpoint(*rds.ModifyDBProxyEndpointInput) (*rds.ModifyDBProxyEndpointOutput, error) + ModifyDBProxyEndpointWithContext(aws.Context, *rds.ModifyDBProxyEndpointInput, ...request.Option) (*rds.ModifyDBProxyEndpointOutput, error) + ModifyDBProxyEndpointRequest(*rds.ModifyDBProxyEndpointInput) (*request.Request, *rds.ModifyDBProxyEndpointOutput) + ModifyDBProxyTargetGroup(*rds.ModifyDBProxyTargetGroupInput) (*rds.ModifyDBProxyTargetGroupOutput, error) ModifyDBProxyTargetGroupWithContext(aws.Context, *rds.ModifyDBProxyTargetGroupInput, ...request.Option) (*rds.ModifyDBProxyTargetGroupOutput, error) ModifyDBProxyTargetGroupRequest(*rds.ModifyDBProxyTargetGroupInput) (*request.Request, *rds.ModifyDBProxyTargetGroupOutput)