From 7175ab4707ffadca4df159738d9af71bbef841c2 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Fri, 21 Jul 2023 14:49:58 -0400 Subject: [PATCH] Release v1.44.306 (2023-07-21) (#4924) Release v1.44.306 (2023-07-21) === ### Service Client Updates * `service/glue`: Updates service API and documentation * This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against. * `service/mediaconvert`: Updates service documentation * This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs. * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle. * `service/workspaces`: Updates service documentation * Fixed VolumeEncryptionKey descriptions ### SDK Bugs * `codegen`: Prevent unused imports from being generated for event streams. * Potentially-unused `"time"` import was causing vet failures on generated code. --- CHANGELOG.md | 17 + CHANGELOG_PENDING.md | 2 - aws/endpoints/defaults.go | 13 +- aws/version.go | 2 +- models/apis/glue/2017-03-31/api-2.json | 16 +- models/apis/glue/2017-03-31/docs-2.json | 28 +- .../apis/mediaconvert/2017-08-29/docs-2.json | 8 +- models/apis/rds/2014-10-31/api-2.json | 6 +- models/apis/rds/2014-10-31/docs-2.json | 46 +- models/apis/workspaces/2015-04-08/docs-2.json | 4 +- models/endpoints/endpoints.json | 11 +- service/glue/api.go | 452 ++++++++++++------ service/mediaconvert/api.go | 32 +- service/rds/api.go | 215 +++++---- service/workspaces/api.go | 8 +- 15 files changed, 559 insertions(+), 301 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b2c71890ce..349fc127fa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,20 @@ +Release v1.44.306 (2023-07-21) +=== + +### Service Client Updates +* `service/glue`: Updates service API and documentation + * This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against. +* `service/mediaconvert`: Updates service documentation + * This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs. +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle. +* `service/workspaces`: Updates service documentation + * Fixed VolumeEncryptionKey descriptions + +### SDK Bugs +* `codegen`: Prevent unused imports from being generated for event streams. + * Potentially-unused `"time"` import was causing vet failures on generated code. + Release v1.44.305 (2023-07-20) === diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index ea3f148310e..8a1927a39ca 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,5 +3,3 @@ ### SDK Enhancements ### SDK Bugs -* `codegen`: Prevent unused imports from being generated for event streams. - * Potentially-unused `"time"` import was causing vet failures on generated code. diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index c875912ade4..08fd63dd525 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -32598,11 +32598,18 @@ var awscnPartition = partition{ }, }, "savingsplans": service{ - PartitionEndpoint: "aws-cn", - IsRegionalized: boxedFalse, + IsRegionalized: boxedTrue, Endpoints: serviceEndpoints{ endpointKey{ - Region: "aws-cn", + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", }: endpoint{ Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ diff --git a/aws/version.go b/aws/version.go index 93a8e5a089d..e1e4aab464d 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.305" +const SDKVersion = "1.44.306" diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index 597eeeda19e..b669b43885a 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -4816,7 +4816,8 @@ "DynamoDBTargets":{"shape":"DynamoDBTargetList"}, "CatalogTargets":{"shape":"CatalogTargetList"}, "DeltaTargets":{"shape":"DeltaTargetList"}, - "IcebergTargets":{"shape":"IcebergTargetList"} + "IcebergTargets":{"shape":"IcebergTargetList"}, + "HudiTargets":{"shape":"HudiTargetList"} } }, "CrawlsFilter":{ @@ -8182,6 +8183,15 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "HudiTarget":{ + "type":"structure", + "members":{ + "Paths":{"shape":"PathList"}, + "ConnectionName":{"shape":"ConnectionName"}, + "Exclusions":{"shape":"PathList"}, + "MaximumTraversalDepth":{"shape":"NullableInteger"} + } + }, "HudiTargetCompressionType":{ "type":"string", "enum":[ @@ -8191,6 +8201,10 @@ "snappy" ] }, + "HudiTargetList":{ + "type":"list", + "member":{"shape":"HudiTarget"} + }, "IcebergInput":{ "type":"structure", "required":["MetadataOperation"], diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 45f8db39132..036f1dbf9d7 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -1404,6 +1404,7 @@ "refs": { "CatalogTarget$ConnectionName": "
The name of the connection for an Amazon S3-backed Data Catalog table to be a target of the crawl when using a Catalog
connection type paired with a NETWORK
Connection type.
The name of the connection to use to connect to the Delta table target.
", + "HudiTarget$ConnectionName": "The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.
", "IcebergTarget$ConnectionName": "The name of the connection to use to connect to the Iceberg target.
", "JdbcTarget$ConnectionName": "The name of the connection to use to connect to the JDBC target.
", "MongoDBTarget$ConnectionName": "The name of the connection to use to connect to the Amazon DocumentDB or MongoDB target.
", @@ -4312,12 +4313,24 @@ "UpdateMLTransformResponse$TransformId": "The unique identifier for the transform that was updated.
" } }, + "HudiTarget": { + "base": "Specifies an Apache Hudi data source.
", + "refs": { + "HudiTargetList$member": null + } + }, "HudiTargetCompressionType": { "base": null, "refs": { "S3HudiDirectTarget$Compression": "Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\"
and \"bzip\"
).
Specifies Apache Hudi data store targets.
" + } + }, "IcebergInput": { "base": "A structure that defines an Apache Iceberg metadata table to create in the catalog.
", "refs": { @@ -5886,6 +5899,7 @@ "GetDataQualityRulesetEvaluationRunResponse$NumberOfWorkers": "The number of G.1X
workers to be used in the run. The default is 5.
The number of workers of a defined workerType
that are allocated when this task runs.
The maximum number of times to retry a task for this transform after a task run fails.
", + "HudiTarget$MaximumTraversalDepth": "The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.
", "IcebergTarget$MaximumTraversalDepth": "The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Iceberg metadata folder in your Amazon S3 path. Used to limit the crawler run time.
", "Job$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated when a job runs.
The number of workers of a defined workerType
that are allocated when a job runs.
A list of the Amazon S3 paths to the Delta tables.
", + "HudiTarget$Paths": "An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder.
The crawler will scan all folders underneath a path for a Hudi folder.
", + "HudiTarget$Exclusions": "A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
", "IcebergTarget$Paths": "One or more Amazon S3 paths that contains Iceberg metadata folders as s3://bucket/prefix
.
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
", "JdbcTarget$Exclusions": "A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
", @@ -8568,16 +8584,16 @@ "refs": { "CreateDevEndpointRequest$WorkerType": "The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.
", - "CreateJobRequest$WorkerType": "The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
MaxCapacity
and NumberOfWorkers
must both be at least 1.
The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
MaxCapacity
and NumberOfWorkers
must both be at least 1.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
Creates an RDS event notification subscription. This operation requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType
) that you want to be notified of and provide a list of RDS sources (SourceIds
) that triggers the events. You can also provide a list of event categories (EventCategories
) for events that you want to be notified of. For example, you can specify SourceType
= db-instance
, SourceIds
= mydbinstance1
, mydbinstance2
and EventCategories
= Availability
, Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
and SourceIds
= myDBInstance1
, you are notified of all the db-instance
events for the specified source. If you specify a SourceType
but do not specify SourceIds
, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIds
, you are notified of events generated from all RDS sources belonging to your customer account.
For more information about subscribing to an event for RDS DB engines, see Subscribing to Amazon RDS event notification in the Amazon RDS User Guide.
For more information about subscribing to an event for Aurora DB engines, see Subscribing to Amazon RDS event notification in the Amazon Aurora User Guide.
", "CreateGlobalCluster": "Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This operation applies only to Aurora DB clusters.
Creates a new option group. You can create up to 20 option groups.
This command doesn't apply to RDS Custom.
", - "DeleteBlueGreenDeployment": "Deletes a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", + "DeleteBlueGreenDeployment": "Deletes a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", "DeleteCustomDBEngineVersion": "Deletes a custom engine version. To run this command, make sure you meet the following prerequisites:
The CEV must not be the default for RDS Custom. If it is, change the default before running this command.
The CEV must not be associated with an RDS Custom DB instance, RDS Custom instance snapshot, or automated backup of your RDS Custom instance.
Typically, deletion takes a few minutes.
The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the DeleteCustomDbEngineVersion
event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the DeleteCustomDbEngineVersion
event.
For more information, see Deleting a CEV in the Amazon RDS User Guide.
", "DeleteDBCluster": "The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.
If you're deleting a Multi-AZ DB cluster with read replicas, all cluster members are terminated and read replicas are promoted to standalone instances.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.
", "DeleteDBClusterEndpoint": "Deletes a custom endpoint and removes it from an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
Deletes an existing option group.
", "DeregisterDBProxyTargets": "Remove the association between one or more DBProxyTarget
data structures and a DBProxyTargetGroup
.
Lists all of the attributes for a customer account. The attributes include Amazon RDS quotas for the account, such as the number of DB instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.
This command doesn't take any parameters.
", - "DescribeBlueGreenDeployments": "Returns information about blue/green deployments.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", + "DescribeBlueGreenDeployments": "Describes one or more blue/green deployments.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", "DescribeCertificates": "Lists the set of CA certificates provided by Amazon RDS for this Amazon Web Services account.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
", "DescribeDBClusterBacktracks": "Returns information about backtracks for a DB cluster.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora MySQL DB clusters.
Returns information about endpoints for an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
Stops an Amazon Aurora DB cluster. When you stop a DB cluster, Aurora retains the DB cluster's metadata, including its endpoints and DB parameter groups. Aurora also retains the transaction logs so you can do a point-in-time restore if necessary.
For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Stops an Amazon RDS DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary.
For more information, see Stopping an Amazon RDS DB Instance Temporarily in the Amazon RDS User Guide.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. For Aurora clusters, use StopDBCluster
instead.
Stops automated backup replication for a DB instance.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
For more information, see Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.
", - "SwitchoverBlueGreenDeployment": "Switches over a blue/green deployment.
Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", + "SwitchoverBlueGreenDeployment": "Switches over a blue/green deployment.
Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", "SwitchoverReadReplica": "Switches over an Oracle standby database in an Oracle Data Guard environment, making it the new primary database. Issue this command in the Region that hosts the current standby database.
" }, "shapes": { @@ -346,7 +346,7 @@ } }, "BlueGreenDeployment": { - "base": "Contains the details about a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", + "base": "Details about a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", "refs": { "BlueGreenDeploymentList$member": null, "CreateBlueGreenDeploymentResponse$BlueGreenDeployment": null, @@ -362,16 +362,16 @@ "BlueGreenDeploymentIdentifier": { "base": null, "refs": { - "BlueGreenDeployment$BlueGreenDeploymentIdentifier": "The system-generated identifier of the blue/green deployment.
", - "DeleteBlueGreenDeploymentRequest$BlueGreenDeploymentIdentifier": "The blue/green deployment identifier of the deployment to be deleted. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
The blue/green deployment identifier. If this parameter is specified, information from only the specific blue/green deployment is returned. This parameter isn't case-sensitive.
Constraints:
If supplied, must match an existing blue/green deployment identifier.
The blue/green deployment identifier.
Constraints:
Must match an existing blue/green deployment identifier.
The unique identifier of the blue/green deployment.
", + "DeleteBlueGreenDeploymentRequest$BlueGreenDeploymentIdentifier": "The unique identifier of the blue/green deployment to delete. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
The blue/green deployment identifier. If you specify this parameter, the response only includes information about the specific blue/green deployment. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
The unique identifier of the blue/green deployment.
Constraints:
Must match an existing blue/green deployment identifier.
Contains a list of blue/green deployments for the user.
" + "DescribeBlueGreenDeploymentsResponse$BlueGreenDeployments": "A list of blue/green deployments in the current account and Amazon Web Services Region.
" } }, "BlueGreenDeploymentName": { @@ -389,7 +389,7 @@ "BlueGreenDeploymentStatus": { "base": null, "refs": { - "BlueGreenDeployment$Status": "The status of the blue/green deployment.
Values:
PROVISIONING
- Resources are being created in the green environment.
AVAILABLE
- Resources are available in the green environment.
SWITCHOVER_IN_PROGRESS
- The deployment is being switched from the blue environment to the green environment.
SWITCHOVER_COMPLETED
- Switchover from the blue environment to the green environment is complete.
INVALID_CONFIGURATION
- Resources in the green environment are invalid, so switchover isn't possible.
SWITCHOVER_FAILED
- Switchover was attempted but failed.
DELETING
- The blue/green deployment is being deleted.
The status of the blue/green deployment.
Valid Values:
PROVISIONING
- Resources are being created in the green environment.
AVAILABLE
- Resources are available in the green environment.
SWITCHOVER_IN_PROGRESS
- The deployment is being switched from the blue environment to the green environment.
SWITCHOVER_COMPLETED
- Switchover from the blue environment to the green environment is complete.
INVALID_CONFIGURATION
- Resources in the green environment are invalid, so switchover isn't possible.
SWITCHOVER_FAILED
- Switchover was attempted but failed.
DELETING
- The blue/green deployment is being deleted.
Contains the details about a task for a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", + "base": "Details about a task for a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", "refs": { "BlueGreenDeploymentTaskList$member": null } @@ -419,7 +419,7 @@ "BlueGreenDeploymentTaskStatus": { "base": null, "refs": { - "BlueGreenDeploymentTask$Status": "The status of the blue/green deployment task.
Values:
PENDING
- The resources are being prepared for deployment.
IN_PROGRESS
- The resource is being deployed.
COMPLETED
- The resource has been deployed.
FAILED
- Deployment of the resource failed.
The status of the blue/green deployment task.
Valid Values:
PENDING
- The resource is being prepared for deployment.
IN_PROGRESS
- The resource is being deployed.
COMPLETED
- The resource has been deployed.
FAILED
- Deployment of the resource failed.
Indicates whether Performance Insights is enabled for the DB instance.
", "DBInstance$CustomerOwnedIpEnabled": "Indicates whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance.
A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.
For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.
For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.
", "DBInstance$ActivityStreamEngineNativeAuditFieldsIncluded": "Indicates whether engine-native audit fields are included in the database activity stream.
", - "DeleteBlueGreenDeploymentRequest$DeleteTarget": "A value that indicates whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED
.
Specifies whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED
.
A value that indicates whether to remove automated backups immediately after the DB instance is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB instance is deleted.
", "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "A value that indicates whether to list the supported character sets for each engine version.
If this parameter is enabled and the requested engine supports the CharacterSetName
parameter for CreateDBInstance
, the response includes a list of supported character sets for each engine version.
For RDS Custom, the default is not to list supported character sets. If you set ListSupportedCharacterSets
to true
, RDS Custom returns no results.
A value that indicates whether to list the supported time zones for each engine version.
If this parameter is enabled and the requested engine supports the TimeZone
parameter for CreateDBInstance
, the response includes a list of supported time zones for each engine version.
For RDS Custom, the default is not to list supported time zones. If you set ListSupportedTimezones
to true
, RDS Custom returns no results.
A filter that specifies one or more blue/green deployments to describe.
Supported filters:
blue-green-deployment-identifier
- Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.
blue-green-deployment-name
- Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.
source
- Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.
target
- Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
A filter that specifies one or more blue/green deployments to describe.
Valid Values:
blue-green-deployment-identifier
- Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.
blue-green-deployment-name
- Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.
source
- Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.
target
- Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
This parameter isn't currently supported.
", "DescribeDBClusterBacktracksMessage$Filters": "A filter that specifies one or more DB clusters to describe. Supported filters include the following:
db-cluster-backtrack-id
- Accepts backtrack identifiers. The results list includes information about only the backtracks identified by these identifiers.
db-cluster-backtrack-status
- Accepts any of the following backtrack status values:
applying
completed
failed
pending
The results list includes information about only the backtracks identified by these values.
A set of name-value pairs that define which endpoints to include in the output. The filters are specified as name-value pairs, in the format Name=endpoint_type,Values=endpoint_type1,endpoint_type2,...
. Name
can be one of: db-cluster-endpoint-type
, db-cluster-endpoint-custom-type
, db-cluster-endpoint-id
, db-cluster-endpoint-status
. Values
for the db-cluster-endpoint-type
filter can be one or more of: reader
, writer
, custom
. Values
for the db-cluster-endpoint-custom-type
filter can be one or more of: reader
, any
. Values
for the db-cluster-endpoint-status
filter can be one or more of: available
, creating
, deleting
, inactive
, modifying
.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
", + "DescribeBlueGreenDeploymentsRequest$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints:
Must be a minimum of 20.
Can't exceed 100.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
", "DescribeDBProxyEndpointsRequest$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
", "DescribeDBProxyTargetGroupsRequest$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
", @@ -4031,7 +4031,7 @@ "CreateDBClusterParameterGroupMessage$Description": "The description for the DB cluster parameter group.
", "CreateDBClusterSnapshotMessage$DBClusterSnapshotIdentifier": "The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: my-cluster1-snapshot1
The identifier of the DB cluster to create a snapshot for. This parameter isn't case-sensitive.
Constraints:
Must match the identifier of an existing DBCluster.
Example: my-cluster1
The meaning of this parameter differs depending on the database engine.
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.
Constraints:
Must contain 1 to 64 alphanumeric characters.
Can't be a word reserved by the database engine.
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.
Default: postgres
Constraints:
Must contain 1 to 63 alphanumeric characters.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created RDS Custom DB instance.
Default: ORCL
Constraints:
Must contain 1 to 8 alphanumeric characters.
Must contain a letter.
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created DB instance.
Default: ORCL
Constraints:
Can't be longer than 8 characters.
Can't be a word reserved by the database engine, such as the string NULL
.
The name of the database to create when the DB instance is created.
Default: postgres
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The meaning of this parameter differs according to the database engine you use.
MySQL
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine
MariaDB
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine
PostgreSQL
The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres
is created in the DB instance.
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine
Oracle
The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL
. You can't specify the string null
, or any other reserved word, for DBName
.
Default: ORCL
Constraints:
Can't be longer than 8 characters
Amazon RDS Custom for Oracle
The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL
for non-CDBs and RDSCDB
for CDBs.
Default: ORCL
Constraints:
It must contain 1 to 8 alphanumeric characters.
It must contain a letter.
It can't be a word reserved by the database engine.
Amazon RDS Custom for SQL Server
Not applicable. Must be null.
SQL Server
Not applicable. Must be null.
Amazon Aurora MySQL
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.
Constraints:
It must contain 1 to 64 alphanumeric characters.
It can't be a word reserved by the database engine.
Amazon Aurora PostgreSQL
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres
is created in the DB cluster.
Constraints:
It must contain 1 to 63 alphanumeric characters.
It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
It can't be a word reserved by the database engine.
The identifier for this DB instance. This parameter is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: mydbinstance
The compute and memory capacity of the DB instance, for example db.m5.large
. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.
The database engine to use for this DB instance.
Not every database engine is available in every Amazon Web Services Region.
Valid Values:
aurora-mysql
(for Aurora MySQL DB instances)
aurora-postgresql
(for Aurora PostgreSQL DB instances)
custom-oracle-ee
(for RDS Custom for Oracle DB instances)
custom-oracle-ee-cdb
(for RDS Custom for Oracle DB instances)
custom-sqlserver-ee
(for RDS Custom for SQL Server DB instances)
custom-sqlserver-se
(for RDS Custom for SQL Server DB instances)
custom-sqlserver-web
(for RDS Custom for SQL Server DB instances)
mariadb
mysql
oracle-ee
oracle-ee-cdb
oracle-se2
oracle-se2-cdb
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
The network type of the DB instance.
The network type is determined by the DBSubnetGroup
specified for the DB instance. A DBSubnetGroup
can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL
).
For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.
Valid Values: IPV4 | DUAL
The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.
This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB instance.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.
If you don't specify MasterUserSecretKmsKeyId
, then the aws/secretsmanager
KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager
KMS key to encrypt the secret, and you must use a customer managed KMS key.
There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
", "CreateDBInstanceMessage$CACertificateIdentifier": "The CA certificate identifier to use for the DB instance's server certificate.
This setting doesn't apply to RDS Custom DB instances.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
", + "CreateDBInstanceMessage$DBSystemId": "The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term \"Oracle database instance\" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to RDSCDB
. The Oracle SID is also the name of your CDB.
The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.
", "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five.
Constraints:
Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server DB instance.
Can't be specified if the SourceDBClusterIdentifier
parameter is also specified.
For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas in the Amazon RDS User Guide.
For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide.
The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0.
If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier.
If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas.
The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
Default: Inherits from the source DB instance.
", @@ -4205,7 +4206,7 @@ "DBInstance$Engine": "The database engine used for this DB instance.
", "DBInstance$DBInstanceStatus": "The current state of this database.
For information about DB instance statuses, see Viewing DB instance status in the Amazon RDS User Guide.
", "DBInstance$MasterUsername": "The master username for the DB instance.
", - "DBInstance$DBName": "The meaning of this parameter differs depending on the database engine.
For RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The name of the initial database specified for this DB instance when it was created, if one was provided. This same name is returned for the life of the DB instance.
For RDS for Oracle - The Oracle System ID (SID) of the created DB instance. This value is only returned when the object returned is an Oracle DB instance.
Contains the initial database name that you provided (if required) when you created the DB instance. This name is returned for the life of your DB instance. For an RDS for Oracle CDB instance, the name identifies the PDB rather than the CDB.
", "DBInstance$PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod
.
The name of the Availability Zone where the DB instance is located.
", "DBInstance$PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
", @@ -4318,6 +4319,7 @@ "DBSnapshot$Timezone": "The time zone of the DB snapshot. In most cases, the Timezone
element is empty. Timezone
content appears only for snapshots taken from Microsoft SQL Server DB instances that were created with a time zone specified.
The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.
", "DBSnapshot$SnapshotTarget": "Specifies where manual snapshots are stored: Amazon Web Services Outposts or the Amazon Web Services Region.
", + "DBSnapshot$DBSystemId": "The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. The Oracle SID is also the name of your CDB.
", "DBSnapshotAttribute$AttributeName": "The name of the manual DB snapshot attribute.
The attribute named restore
refers to the list of Amazon Web Services accounts that have permission to copy or restore the manual DB cluster snapshot. For more information, see the ModifyDBSnapshotAttribute
API action.
The identifier of the manual DB snapshot that the attributes apply to.
", "DBSnapshotMessage$Marker": "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The name of the option group to be deleted.
You can't delete default option groups.
The identifier of the DBProxy
that is associated with the DBProxyTargetGroup
.
The identifier of the DBProxyTargetGroup
.
An optional pagination token provided by a previous DescribeBlueGreenDeployments
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
A pagination token that can be used in a later DescribeBlueGreenDeployments request.
", + "DescribeBlueGreenDeploymentsRequest$Marker": "An optional pagination token provided by a previous DescribeBlueGreenDeployments
request. If you specify this parameter, the response only includes records beyond the marker, up to the value specified by MaxRecords
.
A pagination token that can be used in a later DescribeBlueGreenDeployments
request.
The user-supplied certificate identifier. If this parameter is specified, information for only the identified certificate is returned. This parameter isn't case-sensitive.
Constraints:
Must match an existing CertificateIdentifier.
An optional pagination token provided by a previous DescribeCertificates
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The DB cluster identifier of the DB cluster to be described. This parameter is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 alphanumeric characters or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: my-cluster1
The amount of time, in seconds, for the switchover to complete. The default is 300.
If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
" + "SwitchoverBlueGreenDeploymentRequest$SwitchoverTimeout": "The amount of time, in seconds, for the switchover to complete.
Default: 300
If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
" } }, "TStamp": { "base": null, "refs": { "BacktrackDBClusterMessage$BacktrackTo": "The timestamp of the time to backtrack the DB cluster to, specified in ISO 8601 format. For more information about ISO 8601, see the ISO8601 Wikipedia page.
If the specified time isn't a consistent time for the DB cluster, Aurora automatically chooses the nearest possible consistent time for the DB cluster.
Constraints:
Must contain a valid ISO 8601 timestamp.
Can't contain a timestamp set in the future.
Example: 2017-07-08T18:00Z
Specifies the time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
", - "BlueGreenDeployment$DeleteTime": "Specifies the time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
", + "BlueGreenDeployment$CreateTime": "The time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
", + "BlueGreenDeployment$DeleteTime": "The time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
", "Certificate$ValidFrom": "The starting date from which the certificate is valid.
", "Certificate$ValidTill": "The final date that the certificate continues to be valid.
", "Certificate$CustomerOverrideValidTill": "If there is an override for the default certificate identifier, when the override expires.
", diff --git a/models/apis/workspaces/2015-04-08/docs-2.json b/models/apis/workspaces/2015-04-08/docs-2.json index f475ad10914..f2027ba5bbf 100644 --- a/models/apis/workspaces/2015-04-08/docs-2.json +++ b/models/apis/workspaces/2015-04-08/docs-2.json @@ -1998,8 +1998,8 @@ "base": null, "refs": { "StandbyWorkspace$VolumeEncryptionKey": "The volume encryption key of the standby WorkSpace.
", - "Workspace$VolumeEncryptionKey": "The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
", - "WorkspaceRequest$VolumeEncryptionKey": "The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
" + "Workspace$VolumeEncryptionKey": "The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
", + "WorkspaceRequest$VolumeEncryptionKey": "The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
" } }, "Workspace": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 5062218fc05..a5cf935e9d5 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -18942,15 +18942,20 @@ }, "savingsplans" : { "endpoints" : { - "aws-cn" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "savingsplans.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { "credentialScope" : { "region" : "cn-northwest-1" }, "hostname" : "savingsplans.cn-northwest-1.amazonaws.com.cn" } }, - "isRegionalized" : false, - "partitionEndpoint" : "aws-cn" + "isRegionalized" : true }, "secretsmanager" : { "endpoints" : { diff --git a/service/glue/api.go b/service/glue/api.go index bf85555406d..921bd192393 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -29080,6 +29080,9 @@ type CrawlerTargets struct { // Specifies Amazon DynamoDB targets. DynamoDBTargets []*DynamoDBTarget `type:"list"` + // Specifies Apache Hudi data store targets. + HudiTargets []*HudiTarget `type:"list"` + // Specifies Apache Iceberg data store targets. IcebergTargets []*IcebergTarget `type:"list"` @@ -29149,6 +29152,12 @@ func (s *CrawlerTargets) SetDynamoDBTargets(v []*DynamoDBTarget) *CrawlerTargets return s } +// SetHudiTargets sets the HudiTargets field's value. +func (s *CrawlerTargets) SetHudiTargets(v []*HudiTarget) *CrawlerTargets { + s.HudiTargets = v + return s +} + // SetIcebergTargets sets the IcebergTargets field's value. func (s *CrawlerTargets) SetIcebergTargets(v []*IcebergTarget) *CrawlerTargets { s.IcebergTargets = v @@ -30980,28 +30989,48 @@ type CreateJobInput struct { Timeout *int64 `min:"1" type:"integer"` // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value + // a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value // Z.2X for Ray jobs. // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. - // - // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB - // of m emory, 128 GB disk), and provides up to 8 Ray workers based on the - // autoscaler. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 + // GB of memory) with 84GB disk (approximately 34GB free), and provides 1 + // executor per worker. We recommend this worker type for low volume streaming + // jobs. This worker type is only available for Glue version 3.0 streaming + // jobs. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `type:"string" enum:"WorkerType"` } @@ -32564,24 +32593,43 @@ type CreateSessionInput struct { // Consult the documentation for other job types. Timeout *int64 `min:"1" type:"integer"` - // The type of predefined worker that is allocated to use for the session. Accepts - // a value of Standard, G.1X, G.2X, or G.025X. - // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. + // The type of predefined worker that is allocated when a job runs. Accepts + // a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X + // for Ray notebooks. + // + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `type:"string" enum:"WorkerType"` } @@ -49529,6 +49577,74 @@ func (s *GrokClassifier) SetVersion(v int64) *GrokClassifier { return s } +// Specifies an Apache Hudi data source. +type HudiTarget struct { + _ struct{} `type:"structure"` + + // The name of the connection to use to connect to the Hudi target. If your + // Hudi files are stored in buckets that require VPC authorization, you can + // set their connection properties here. + ConnectionName *string `type:"string"` + + // A list of glob patterns used to exclude from the crawl. For more information, + // see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). + Exclusions []*string `type:"list"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover + // the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler + // run time. + MaximumTraversalDepth *int64 `type:"integer"` + + // An array of Amazon S3 location strings for Hudi, each indicating the root + // folder with which the metadata files for a Hudi table resides. The Hudi folder + // may be located in a child folder of the root folder. + // + // The crawler will scan all folders underneath a path for a Hudi folder. + Paths []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HudiTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HudiTarget) GoString() string { + return s.String() +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *HudiTarget) SetConnectionName(v string) *HudiTarget { + s.ConnectionName = &v + return s +} + +// SetExclusions sets the Exclusions field's value. +func (s *HudiTarget) SetExclusions(v []*string) *HudiTarget { + s.Exclusions = v + return s +} + +// SetMaximumTraversalDepth sets the MaximumTraversalDepth field's value. +func (s *HudiTarget) SetMaximumTraversalDepth(v int64) *HudiTarget { + s.MaximumTraversalDepth = &v + return s +} + +// SetPaths sets the Paths field's value. +func (s *HudiTarget) SetPaths(v []*string) *HudiTarget { + s.Paths = v + return s +} + // A structure that defines an Apache Iceberg metadata table to create in the // catalog. type IcebergInput_ struct { @@ -50834,46 +50950,48 @@ type Job struct { Timeout *int64 `min:"1" type:"integer"` // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts - // the value Z.2X for Ray jobs. - // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for workloads such as data transforms, joins, and queries, - // to offers a scalable and cost effective way to run most jobs. + // a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value + // Z.2X for Ray jobs. // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for workloads such as data transforms, joins, and queries, - // to offers a scalable and cost effective way to run most jobs. - // - // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB - // of memory, 256 GB disk), and provides 1 executor per worker. We recommend - // this worker type for jobs whose workloads contain your most demanding - // transforms, aggregations, joins, and queries. This worker type is available - // only for Glue version 3.0 or later Spark ETL jobs in the following Amazon - // Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), - // Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), - // Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - // - // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB - // of memory, 512 GB disk), and provides 1 executor per worker. We recommend - // this worker type for jobs whose workloads contain your most demanding - // transforms, aggregations, joins, and queries. This worker type is available - // only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon - // Web Services Regions as supported for the G.4X worker type. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. - // - // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB - // of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per - // vCPU). + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 + // GB of memory) with 84GB disk (approximately 34GB free), and provides 1 + // executor per worker. We recommend this worker type for low volume streaming + // jobs. This worker type is only available for Glue version 3.0 streaming + // jobs. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `type:"string" enum:"WorkerType"` } @@ -51421,28 +51539,48 @@ type JobRun struct { TriggerName *string `min:"1" type:"string"` // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value + // a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value // Z.2X for Ray jobs. // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. - // - // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB - // of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) - // based on the autoscaler. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 + // GB of memory) with 84GB disk (approximately 34GB free), and provides 1 + // executor per worker. We recommend this worker type for low volume streaming + // jobs. This worker type is only available for Glue version 3.0 streaming + // jobs. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `type:"string" enum:"WorkerType"` } @@ -51752,28 +51890,48 @@ type JobUpdate struct { Timeout *int64 `min:"1" type:"integer"` // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value + // a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value // Z.2X for Ray jobs. // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. - // - // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB - // of m emory, 128 GB disk), and provides up to 8 Ray workers based on the - // autoscaler. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 + // GB of memory) with 84GB disk (approximately 34GB free), and provides 1 + // executor per worker. We recommend this worker type for low volume streaming + // jobs. This worker type is only available for Glue version 3.0 streaming + // jobs. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `type:"string" enum:"WorkerType"` } @@ -65482,28 +65640,48 @@ type StartJobRunInput struct { Timeout *int64 `min:"1" type:"integer"` // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value + // a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value // Z.2X for Ray jobs. // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. - // - // * For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of - // m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) - // based on the autoscaler. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 + // GB of memory) with 84GB disk (approximately 34GB free), and provides 1 + // executor per worker. We recommend this worker type for low volume streaming + // jobs. This worker type is only available for Glue version 3.0 streaming + // jobs. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `type:"string" enum:"WorkerType"` } diff --git a/service/mediaconvert/api.go b/service/mediaconvert/api.go index 67e7fef9651..02e0627d0e2 100644 --- a/service/mediaconvert/api.go +++ b/service/mediaconvert/api.go @@ -22303,16 +22303,12 @@ type ProresSettings struct { // you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 // sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma // sampling. You must specify a value for this setting when your output codec - // profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma - // sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose - // an output codec profile that supports 4:4:4 chroma sampling. These values - // for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 - // (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When - // you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all - // video preprocessors except for Nexguard file marker (PartnerWatermarking). - // When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate - // conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) - // to Drop duplicate (DUPLICATE_DROP). + // profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes + // outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when + // your input has 4:4:4 chroma sampling and your output codec Profile is Apple + // ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, + // you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, + // or Noise reducer. ChromaSampling *string `locationName:"chromaSampling" type:"string" enum:"ProresChromaSampling"` // Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec @@ -36468,16 +36464,12 @@ func PricingPlan_Values() []string { // you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 // sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma // sampling. You must specify a value for this setting when your output codec -// profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma -// sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose -// an output codec profile that supports 4:4:4 chroma sampling. These values -// for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 -// (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When -// you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all -// video preprocessors except for Nexguard file marker (PartnerWatermarking). -// When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate -// conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) -// to Drop duplicate (DUPLICATE_DROP). +// profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes +// outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when +// your input has 4:4:4 chroma sampling and your output codec Profile is Apple +// ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, +// you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, +// or Noise reducer. const ( // ProresChromaSamplingPreserve444Sampling is a ProresChromaSampling enum value ProresChromaSamplingPreserve444Sampling = "PRESERVE_444_SAMPLING" diff --git a/service/rds/api.go b/service/rds/api.go index 50e854dc9bb..a935f88fdb5 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -4917,7 +4917,7 @@ func (c *RDS) DescribeBlueGreenDeploymentsRequest(input *DescribeBlueGreenDeploy // DescribeBlueGreenDeployments API operation for Amazon Relational Database Service. // -// Returns information about blue/green deployments. +// Describes one or more blue/green deployments. // // For more information, see Using Amazon RDS Blue/Green Deployments for database // updates (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments.html) @@ -16658,7 +16658,7 @@ func (s *BacktrackDBClusterOutput) SetStatus(v string) *BacktrackDBClusterOutput return s } -// Contains the details about a blue/green deployment. +// Details about a blue/green deployment. // // For more information, see Using Amazon RDS Blue/Green Deployments for database // updates (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments.html) @@ -16668,18 +16668,18 @@ func (s *BacktrackDBClusterOutput) SetStatus(v string) *BacktrackDBClusterOutput type BlueGreenDeployment struct { _ struct{} `type:"structure"` - // The system-generated identifier of the blue/green deployment. + // The unique identifier of the blue/green deployment. BlueGreenDeploymentIdentifier *string `min:"1" type:"string"` // The user-supplied name of the blue/green deployment. BlueGreenDeploymentName *string `min:"1" type:"string"` - // Specifies the time when the blue/green deployment was created, in Universal - // Coordinated Time (UTC). + // The time when the blue/green deployment was created, in Universal Coordinated + // Time (UTC). CreateTime *time.Time `type:"timestamp"` - // Specifies the time when the blue/green deployment was deleted, in Universal - // Coordinated Time (UTC). + // The time when the blue/green deployment was deleted, in Universal Coordinated + // Time (UTC). DeleteTime *time.Time `type:"timestamp"` // The source database for the blue/green deployment. @@ -16690,7 +16690,7 @@ type BlueGreenDeployment struct { // The status of the blue/green deployment. // - // Values: + // Valid Values: // // * PROVISIONING - Resources are being created in the green environment. // @@ -16815,7 +16815,7 @@ func (s *BlueGreenDeployment) SetTasks(v []*BlueGreenDeploymentTask) *BlueGreenD return s } -// Contains the details about a task for a blue/green deployment. +// Details about a task for a blue/green deployment. // // For more information, see Using Amazon RDS Blue/Green Deployments for database // updates (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments.html) @@ -16830,9 +16830,9 @@ type BlueGreenDeploymentTask struct { // The status of the blue/green deployment task. // - // Values: + // Valid Values: // - // * PENDING - The resources are being prepared for deployment. + // * PENDING - The resource is being prepared for deployment. // // * IN_PROGRESS - The resource is being deployed. // @@ -18746,7 +18746,7 @@ func (s *CreateBlueGreenDeploymentInput) SetTargetEngineVersion(v string) *Creat type CreateBlueGreenDeploymentOutput struct { _ struct{} `type:"structure"` - // Contains the details about a blue/green deployment. + // Details about a blue/green deployment. // // For more information, see Using Amazon RDS Blue/Green Deployments for database // updates (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments.html) @@ -21150,113 +21150,115 @@ type CreateDBInstanceInput struct { // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` - // The meaning of this parameter differs depending on the database engine. + // The meaning of this parameter differs according to the database engine you + // use. // - // Amazon Aurora MySQL + // MySQL // - // The name of the database to create when the primary DB instance of the Aurora - // MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't - // create a database in the DB cluster. + // The name of the database to create when the DB instance is created. If this + // parameter isn't specified, no database is created in the DB instance. // // Constraints: // - // * Must contain 1 to 64 alphanumeric characters. + // * Must contain 1 to 64 letters or numbers. // - // * Can't be a word reserved by the database engine. + // * Must begin with a letter. Subsequent characters can be letters, underscores, + // or digits (0-9). // - // Amazon Aurora PostgreSQL + // * Can't be a word reserved by the specified database engine // - // The name of the database to create when the primary DB instance of the Aurora - // PostgreSQL DB cluster is created. + // MariaDB // - // Default: postgres + // The name of the database to create when the DB instance is created. If this + // parameter isn't specified, no database is created in the DB instance. // // Constraints: // - // * Must contain 1 to 63 alphanumeric characters. + // * Must contain 1 to 64 letters or numbers. // // * Must begin with a letter. Subsequent characters can be letters, underscores, - // or digits (0 to 9). - // - // * Can't be a word reserved by the database engine. + // or digits (0-9). // - // Amazon RDS Custom for Oracle + // * Can't be a word reserved by the specified database engine // - // The Oracle System ID (SID) of the created RDS Custom DB instance. + // PostgreSQL // - // Default: ORCL + // The name of the database to create when the DB instance is created. If this + // parameter isn't specified, a database named postgres is created in the DB + // instance. // // Constraints: // - // * Must contain 1 to 8 alphanumeric characters. - // - // * Must contain a letter. + // * Must contain 1 to 63 letters, numbers, or underscores. // - // * Can't be a word reserved by the database engine. + // * Must begin with a letter. Subsequent characters can be letters, underscores, + // or digits (0-9). // - // Amazon RDS Custom for SQL Server + // * Can't be a word reserved by the specified database engine // - // Not applicable. Must be null. + // Oracle // - // RDS for MariaDB + // The Oracle System ID (SID) of the created DB instance. If you don't specify + // a value, the default value is ORCL. You can't specify the string null, or + // any other reserved word, for DBName. // - // The name of the database to create when the DB instance is created. If you - // don't specify a value, Amazon RDS doesn't create a database in the DB instance. + // Default: ORCL // // Constraints: // - // * Must contain 1 to 64 letters or numbers. - // - // * Must begin with a letter. Subsequent characters can be letters, underscores, - // or digits (0-9). + // * Can't be longer than 8 characters // - // * Can't be a word reserved by the database engine. + // Amazon RDS Custom for Oracle // - // RDS for MySQL + // The Oracle System ID (SID) of the created RDS Custom DB instance. If you + // don't specify a value, the default value is ORCL for non-CDBs and RDSCDB + // for CDBs. // - // The name of the database to create when the DB instance is created. If you - // don't specify a value, Amazon RDS doesn't create a database in the DB instance. + // Default: ORCL // // Constraints: // - // * Must contain 1 to 64 letters or numbers. + // * It must contain 1 to 8 alphanumeric characters. // - // * Must begin with a letter. Subsequent characters can be letters, underscores, - // or digits (0-9). + // * It must contain a letter. // - // * Can't be a word reserved by the database engine. + // * It can't be a word reserved by the database engine. // - // RDS for Oracle + // Amazon RDS Custom for SQL Server // - // The Oracle System ID (SID) of the created DB instance. + // Not applicable. Must be null. // - // Default: ORCL + // SQL Server // - // Constraints: + // Not applicable. Must be null. // - // * Can't be longer than 8 characters. + // Amazon Aurora MySQL // - // * Can't be a word reserved by the database engine, such as the string - // NULL. + // The name of the database to create when the primary DB instance of the Aurora + // MySQL DB cluster is created. If this parameter isn't specified for an Aurora + // MySQL DB cluster, no database is created in the DB cluster. // - // RDS for PostgreSQL + // Constraints: // - // The name of the database to create when the DB instance is created. + // * It must contain 1 to 64 alphanumeric characters. // - // Default: postgres + // * It can't be a word reserved by the database engine. // - // Constraints: + // Amazon Aurora PostgreSQL // - // * Must contain 1 to 63 letters, numbers, or underscores. + // The name of the database to create when the primary DB instance of the Aurora + // PostgreSQL DB cluster is created. If this parameter isn't specified for an + // Aurora PostgreSQL DB cluster, a database named postgres is created in the + // DB cluster. // - // * Must begin with a letter. Subsequent characters can be letters, underscores, - // or digits (0-9). + // Constraints: // - // * Can't be a word reserved by the database engine. + // * It must contain 1 to 63 alphanumeric characters. // - // RDS for SQL Server + // * It must begin with a letter. Subsequent characters can be letters, underscores, + // or digits (0 to 9). // - // Not applicable. Must be null. + // * It can't be a word reserved by the database engine. DBName *string `type:"string"` // The name of the DB parameter group to associate with this DB instance. If @@ -21291,6 +21293,13 @@ type CreateDBInstanceInput struct { // Example: mydbsubnetgroup DBSubnetGroupName *string `type:"string"` + // The Oracle system identifier (SID), which is the name of the Oracle database + // instance that manages your database files. In this context, the term "Oracle + // database instance" refers exclusively to the system global area (SGA) and + // Oracle background processes. If you don't specify a SID, the value defaults + // to RDSCDB. The Oracle SID is also the name of your CDB. + DBSystemId *string `type:"string"` + // Specifies whether the DB instance has deletion protection enabled. The database // can't be deleted when deletion protection is enabled. By default, deletion // protection isn't enabled. For more information, see Deleting a DB Instance @@ -22067,6 +22076,12 @@ func (s *CreateDBInstanceInput) SetDBSubnetGroupName(v string) *CreateDBInstance return s } +// SetDBSystemId sets the DBSystemId field's value. +func (s *CreateDBInstanceInput) SetDBSystemId(v string) *CreateDBInstanceInput { + s.DBSystemId = &v + return s +} + // SetDeletionProtection sets the DeletionProtection field's value. func (s *CreateDBInstanceInput) SetDeletionProtection(v bool) *CreateDBInstanceInput { s.DeletionProtection = &v @@ -26757,15 +26772,10 @@ type DBInstance struct { // in the Amazon RDS User Guide. DBInstanceStatus *string `type:"string"` - // The meaning of this parameter differs depending on the database engine. - // - // * For RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The - // name of the initial database specified for this DB instance when it was - // created, if one was provided. This same name is returned for the life - // of the DB instance. - // - // * For RDS for Oracle - The Oracle System ID (SID) of the created DB instance. - // This value is only returned when the object returned is an Oracle DB instance. + // Contains the initial database name that you provided (if required) when you + // created the DB instance. This name is returned for the life of your DB instance. + // For an RDS for Oracle CDB instance, the name identifies the PDB rather than + // the CDB. DBName *string `type:"string"` // The list of DB parameter groups applied to this DB instance. @@ -28852,6 +28862,11 @@ type DBSnapshot struct { // Specifies the identifier for the DB snapshot. DBSnapshotIdentifier *string `type:"string"` + // The Oracle system identifier (SID), which is the name of the Oracle database + // instance that manages your database files. The Oracle SID is also the name + // of your CDB. + DBSystemId *string `type:"string"` + // The identifier for the source DB instance, which can't be changed and which // is unique to an Amazon Web Services Region. DbiResourceId *string `type:"string"` @@ -29012,6 +29027,12 @@ func (s *DBSnapshot) SetDBSnapshotIdentifier(v string) *DBSnapshot { return s } +// SetDBSystemId sets the DBSystemId field's value. +func (s *DBSnapshot) SetDBSystemId(v string) *DBSnapshot { + s.DBSystemId = &v + return s +} + // SetDbiResourceId sets the DbiResourceId field's value. func (s *DBSnapshot) SetDbiResourceId(v string) *DBSnapshot { s.DbiResourceId = &v @@ -29385,8 +29406,8 @@ func (s *DBSubnetGroup) SetVpcId(v string) *DBSubnetGroup { type DeleteBlueGreenDeploymentInput struct { _ struct{} `type:"structure"` - // The blue/green deployment identifier of the deployment to be deleted. This - // parameter isn't case-sensitive. + // The unique identifier of the blue/green deployment to delete. This parameter + // isn't case-sensitive. // // Constraints: // @@ -29395,8 +29416,8 @@ type DeleteBlueGreenDeploymentInput struct { // BlueGreenDeploymentIdentifier is a required field BlueGreenDeploymentIdentifier *string `min:"1" type:"string" required:"true"` - // A value that indicates whether to delete the resources in the green environment. - // You can't specify this option if the blue/green deployment status (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_BlueGreenDeployment.html) + // Specifies whether to delete the resources in the green environment. You can't + // specify this option if the blue/green deployment status (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_BlueGreenDeployment.html) // is SWITCHOVER_COMPLETED. DeleteTarget *bool `type:"boolean"` } @@ -29450,7 +29471,7 @@ func (s *DeleteBlueGreenDeploymentInput) SetDeleteTarget(v bool) *DeleteBlueGree type DeleteBlueGreenDeploymentOutput struct { _ struct{} `type:"structure"` - // Contains the details about a blue/green deployment. + // Details about a blue/green deployment. // // For more information, see Using Amazon RDS Blue/Green Deployments for database // updates (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments.html) @@ -31448,18 +31469,18 @@ func (s *DescribeAccountAttributesOutput) SetAccountQuotas(v []*AccountQuota) *D type DescribeBlueGreenDeploymentsInput struct { _ struct{} `type:"structure"` - // The blue/green deployment identifier. If this parameter is specified, information - // from only the specific blue/green deployment is returned. This parameter - // isn't case-sensitive. + // The blue/green deployment identifier. If you specify this parameter, the + // response only includes information about the specific blue/green deployment. + // This parameter isn't case-sensitive. // // Constraints: // - // * If supplied, must match an existing blue/green deployment identifier. + // * Must match an existing blue/green deployment identifier. BlueGreenDeploymentIdentifier *string `min:"1" type:"string"` // A filter that specifies one or more blue/green deployments to describe. // - // Supported filters: + // Valid Values: // // * blue-green-deployment-identifier - Accepts system-generated identifiers // for blue/green deployments. The results list only includes information @@ -31479,7 +31500,7 @@ type DescribeBlueGreenDeploymentsInput struct { Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeBlueGreenDeployments - // request. If this parameter is specified, the response includes only records + // request. If you specify this parameter, the response only includes records // beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` @@ -31489,7 +31510,11 @@ type DescribeBlueGreenDeploymentsInput struct { // // Default: 100 // - // Constraints: Minimum 20, maximum 100. + // Constraints: + // + // * Must be a minimum of 20. + // + // * Can't exceed 100. MaxRecords *int64 `min:"20" type:"integer"` } @@ -31564,7 +31589,8 @@ func (s *DescribeBlueGreenDeploymentsInput) SetMaxRecords(v int64) *DescribeBlue type DescribeBlueGreenDeploymentsOutput struct { _ struct{} `type:"structure"` - // Contains a list of blue/green deployments for the user. + // A list of blue/green deployments in the current account and Amazon Web Services + // Region. BlueGreenDeployments []*BlueGreenDeployment `type:"list"` // A pagination token that can be used in a later DescribeBlueGreenDeployments @@ -53234,7 +53260,7 @@ func (s *Subnet) SetSubnetStatus(v string) *Subnet { type SwitchoverBlueGreenDeploymentInput struct { _ struct{} `type:"structure"` - // The blue/green deployment identifier. + // The unique identifier of the blue/green deployment. // // Constraints: // @@ -53243,8 +53269,9 @@ type SwitchoverBlueGreenDeploymentInput struct { // BlueGreenDeploymentIdentifier is a required field BlueGreenDeploymentIdentifier *string `min:"1" type:"string" required:"true"` - // The amount of time, in seconds, for the switchover to complete. The default - // is 300. + // The amount of time, in seconds, for the switchover to complete. + // + // Default: 300 // // If the switchover takes longer than the specified duration, then any changes // are rolled back, and no changes are made to the environments. @@ -53303,7 +53330,7 @@ func (s *SwitchoverBlueGreenDeploymentInput) SetSwitchoverTimeout(v int64) *Swit type SwitchoverBlueGreenDeploymentOutput struct { _ struct{} `type:"structure"` - // Contains the details about a blue/green deployment. + // Details about a blue/green deployment. // // For more information, see Using Amazon RDS Blue/Green Deployments for database // updates (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments.html) diff --git a/service/workspaces/api.go b/service/workspaces/api.go index e9869ef852d..200473d4582 100644 --- a/service/workspaces/api.go +++ b/service/workspaces/api.go @@ -15895,8 +15895,8 @@ type Workspace struct { // Indicates whether the data stored on the user volume is encrypted. UserVolumeEncryptionEnabled *bool `type:"boolean"` - // The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon - // WorkSpaces does not support asymmetric KMS keys. + // The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. + // Amazon WorkSpaces does not support asymmetric KMS keys. VolumeEncryptionKey *string `type:"string"` // The identifier of the WorkSpace. @@ -16872,8 +16872,8 @@ type WorkspaceRequest struct { // Indicates whether the data stored on the user volume is encrypted. UserVolumeEncryptionEnabled *bool `type:"boolean"` - // The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon - // WorkSpaces does not support asymmetric KMS keys. + // The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. + // Amazon WorkSpaces does not support asymmetric KMS keys. VolumeEncryptionKey *string `type:"string"` // The WorkSpace properties.