From bc7b9163e31bf733f67ea4c8271710c8511d4fe1 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 9 Jul 2024 18:08:12 +0000 Subject: [PATCH 1/3] Update to latest models --- .../api-change-datazone-24284.json | 5 + .../next-release/api-change-fsx-63480.json | 5 + .../api-change-opensearch-6019.json | 5 + .../api-change-sagemaker-55808.json | 5 + .../data/datazone/2018-05-10/service-2.json | 14 +- botocore/data/fsx/2018-03-01/service-2.json | 48 +- .../data/opensearch/2021-01-01/service-2.json | 90 +++ .../sagemaker/2017-07-24/paginators-1.json | 6 + .../data/sagemaker/2017-07-24/service-2.json | 632 +++++++++++++++++- 9 files changed, 787 insertions(+), 23 deletions(-) create mode 100644 .changes/next-release/api-change-datazone-24284.json create mode 100644 .changes/next-release/api-change-fsx-63480.json create mode 100644 .changes/next-release/api-change-opensearch-6019.json create mode 100644 .changes/next-release/api-change-sagemaker-55808.json diff --git a/.changes/next-release/api-change-datazone-24284.json b/.changes/next-release/api-change-datazone-24284.json new file mode 100644 index 0000000000..cb63ed8eee --- /dev/null +++ b/.changes/next-release/api-change-datazone-24284.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``datazone``", + "description": "This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes" +} diff --git a/.changes/next-release/api-change-fsx-63480.json b/.changes/next-release/api-change-fsx-63480.json new file mode 100644 index 0000000000..8edc84d0e4 --- /dev/null +++ b/.changes/next-release/api-change-fsx-63480.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``fsx``", + "description": "Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems." +} diff --git a/.changes/next-release/api-change-opensearch-6019.json b/.changes/next-release/api-change-opensearch-6019.json new file mode 100644 index 0000000000..861c639b06 --- /dev/null +++ b/.changes/next-release/api-change-opensearch-6019.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``opensearch``", + "description": "This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down." +} diff --git a/.changes/next-release/api-change-sagemaker-55808.json b/.changes/next-release/api-change-sagemaker-55808.json new file mode 100644 index 0000000000..2fa5b19186 --- /dev/null +++ b/.changes/next-release/api-change-sagemaker-55808.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sagemaker``", + "description": "This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action." +} diff --git a/botocore/data/datazone/2018-05-10/service-2.json b/botocore/data/datazone/2018-05-10/service-2.json index 97a617d4dd..a2ac24a9af 100644 --- a/botocore/data/datazone/2018-05-10/service-2.json +++ b/botocore/data/datazone/2018-05-10/service-2.json @@ -5104,11 +5104,15 @@ "documentation":"

" } }, - "documentation":"

" + "documentation":"

", + "deprecated":true, + "deprecatedMessage":"This structure is deprecated." }, "DataProductItems":{ "type":"list", "member":{"shape":"DataProductItem"}, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated.", "max":100, "min":0 }, @@ -5172,7 +5176,9 @@ "documentation":"

" } }, - "documentation":"

" + "documentation":"

", + "deprecated":true, + "deprecatedMessage":"This structure is deprecated." }, "DataSourceConfigurationInput":{ "type":"structure", @@ -12161,7 +12167,9 @@ }, "dataProductItem":{ "shape":"DataProductSummary", - "documentation":"

The data product item included in the search results.

" + "documentation":"

The data product item included in the search results.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated." }, "glossaryItem":{ "shape":"GlossaryItem", diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index 3d15bca4b3..16d7dda7a4 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -11,7 +11,8 @@ "signatureVersion":"v4", "signingName":"fsx", "targetPrefix":"AWSSimbaAPIService_v20180301", - "uid":"fsx-2018-03-01" + "uid":"fsx-2018-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateFileSystemAliases":{ @@ -876,7 +877,7 @@ "AdministrativeActionType":{"shape":"AdministrativeActionType"}, "ProgressPercent":{ "shape":"ProgressPercent", - "documentation":"

The percentage-complete status of a STORAGE_OPTIMIZATION administrative action. Does not apply to any other administrative action type.

" + "documentation":"

The percentage-complete status of a STORAGE_OPTIMIZATION or DOWNLOAD_DATA_FROM_BACKUP administrative action. Does not apply to any other administrative action type.

" }, "RequestTime":{ "shape":"RequestTime", @@ -884,7 +885,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the administrative action, as follows:

" + "documentation":"

The status of the administrative action, as follows:

" }, "TargetFileSystemValues":{ "shape":"FileSystem", @@ -916,7 +917,7 @@ }, "AdministrativeActionType":{ "type":"string", - "documentation":"

Describes the type of administrative action, as follows:

", + "documentation":"

Describes the type of administrative action, as follows:

", "enum":[ "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", @@ -931,7 +932,8 @@ "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", - "VOLUME_INITIALIZE_WITH_SNAPSHOT" + "VOLUME_INITIALIZE_WITH_SNAPSHOT", + "DOWNLOAD_DATA_FROM_BACKUP" ] }, "AdministrativeActions":{ @@ -950,7 +952,7 @@ "members":{ "Aggregates":{ "shape":"Aggregates", - "documentation":"

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "documentation":"

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" }, "TotalConstituents":{ "shape":"TotalConstituents", @@ -1784,7 +1786,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OntapDeploymentType", - "documentation":"

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.

" + "documentation":"

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.

" }, "EndpointIpAddressRange":{ "shape":"IpAddressRange", @@ -1800,7 +1802,7 @@ }, "PreferredSubnetId":{ "shape":"SubnetId", - "documentation":"

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.

" + "documentation":"

Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This specifies the subnet in which you want the preferred file server to be located.

" }, "RouteTableIds":{ "shape":"RouteTableIds", @@ -1813,11 +1815,11 @@ "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "HAPairs":{ "shape":"HAPairs", - "documentation":"

Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "documentation":"

Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" }, "ThroughputCapacityPerHAPair":{ "shape":"ThroughputCapacityPerHAPair", - "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

This field and ThroughputCapacity are the same for scale-up file systems powered by one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

This field and ThroughputCapacity are the same for file systems powered by one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" } }, "documentation":"

The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.

" @@ -1841,7 +1843,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

" + "documentation":"

Specifies the file system deployment type. Valid values are the following:

For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability. For more information on the differences in performance between deployment types, see File system performance in the Amazon FSx for OpenZFS User Guide.

" }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -3460,7 +3462,7 @@ }, "FailureDetails":{ "shape":"FileCacheFailureDetails", - "documentation":"

A structure providing details of any failures that occurred.

" + "documentation":"

A structure providing details of any failures that occurred in creating a cache.

" }, "StorageCapacity":{ "shape":"StorageCapacity", @@ -4374,7 +4376,8 @@ "enum":[ "MULTI_AZ_1", "SINGLE_AZ_1", - "SINGLE_AZ_2" + "SINGLE_AZ_2", + "MULTI_AZ_2" ] }, "OntapEndpointIpAddresses":{ @@ -4390,7 +4393,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OntapDeploymentType", - "documentation":"

Specifies the FSx for ONTAP file system deployment type in use in the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" + "documentation":"

Specifies the FSx for ONTAP file system deployment type in use in the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" }, "EndpointIpAddressRange":{ "shape":"IpAddressRange", @@ -4417,11 +4420,11 @@ }, "HAPairs":{ "shape":"HAPairs", - "documentation":"

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "documentation":"

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" }, "ThroughputCapacityPerHAPair":{ "shape":"ThroughputCapacityPerHAPair", - "documentation":"

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "documentation":"

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" } }, "documentation":"

Configuration for the FSx for NetApp ONTAP file system.

" @@ -4584,6 +4587,8 @@ "enum":[ "SINGLE_AZ_1", "SINGLE_AZ_2", + "SINGLE_AZ_HA_1", + "SINGLE_AZ_HA_2", "MULTI_AZ_1" ] }, @@ -4602,7 +4607,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2.

" + "documentation":"

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1.

" }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -5370,7 +5375,8 @@ "IN_PROGRESS", "PENDING", "COMPLETED", - "UPDATED_OPTIMIZING" + "UPDATED_OPTIMIZING", + "OPTIMIZING" ] }, "StorageCapacity":{ @@ -5894,7 +5900,11 @@ }, "ThroughputCapacityPerHAPair":{ "shape":"ThroughputCapacityPerHAPair", - "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + }, + "HAPairs":{ + "shape":"HAPairs", + "documentation":"

Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system. If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.

" } }, "documentation":"

The configuration updates for an Amazon FSx for NetApp ONTAP file system.

" diff --git a/botocore/data/opensearch/2021-01-01/service-2.json b/botocore/data/opensearch/2021-01-01/service-2.json index 4414ddf9a9..733048f4c9 100644 --- a/botocore/data/opensearch/2021-01-01/service-2.json +++ b/botocore/data/opensearch/2021-01-01/service-2.json @@ -1063,6 +1063,37 @@ } }, "shapes":{ + "AIMLOptionsInput":{ + "type":"structure", + "members":{ + "NaturalLanguageQueryGenerationOptions":{ + "shape":"NaturalLanguageQueryGenerationOptionsInput", + "documentation":"

Container for parameters required for natural language query generation on the specified domain.

" + } + }, + "documentation":"

Container for parameters required to enable all machine learning features.

" + }, + "AIMLOptionsOutput":{ + "type":"structure", + "members":{ + "NaturalLanguageQueryGenerationOptions":{ + "shape":"NaturalLanguageQueryGenerationOptionsOutput", + "documentation":"

Container for parameters required for natural language query generation on the specified domain.

" + } + }, + "documentation":"

Container for parameters representing the state of machine learning features on the specified domain.

" + }, + "AIMLOptionsStatus":{ + "type":"structure", + "members":{ + "Options":{ + "shape":"AIMLOptionsOutput", + "documentation":"

Machine learning options on the specified domain.

" + }, + "Status":{"shape":"OptionStatus"} + }, + "documentation":"

The status of machine learning options on the specified domain.

" + }, "ARN":{ "type":"string", "documentation":"

The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities in Using Amazon Web Services Identity and Access Management for more information.

", @@ -2155,6 +2186,10 @@ "SoftwareUpdateOptions":{ "shape":"SoftwareUpdateOptions", "documentation":"

Software update options for the domain.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsInput", + "documentation":"

Options for all machine learning features for the specified domain.

" } } }, @@ -3207,6 +3242,10 @@ "ModifyingProperties":{ "shape":"ModifyingPropertiesList", "documentation":"

Information about the domain properties that are currently being modified.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsStatus", + "documentation":"

Container for parameters required to enable all machine learning features.

" } }, "documentation":"

Container for the configuration of an OpenSearch Service domain.

" @@ -3614,6 +3653,10 @@ "ModifyingProperties":{ "shape":"ModifyingPropertiesList", "documentation":"

Information about the domain properties that are currently being modified.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsOutput", + "documentation":"

Container for parameters required to enable all machine learning features.

" } }, "documentation":"

The current status of an OpenSearch Service domain.

" @@ -4934,6 +4977,49 @@ "type":"list", "member":{"shape":"ModifyingProperties"} }, + "NaturalLanguageQueryGenerationCurrentState":{ + "type":"string", + "enum":[ + "NOT_ENABLED", + "ENABLE_COMPLETE", + "ENABLE_IN_PROGRESS", + "ENABLE_FAILED", + "DISABLE_COMPLETE", + "DISABLE_IN_PROGRESS", + "DISABLE_FAILED" + ] + }, + "NaturalLanguageQueryGenerationDesiredState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "NaturalLanguageQueryGenerationOptionsInput":{ + "type":"structure", + "members":{ + "DesiredState":{ + "shape":"NaturalLanguageQueryGenerationDesiredState", + "documentation":"

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + }, + "documentation":"

Container for parameters required to enable the natural language query generation feature.

" + }, + "NaturalLanguageQueryGenerationOptionsOutput":{ + "type":"structure", + "members":{ + "DesiredState":{ + "shape":"NaturalLanguageQueryGenerationDesiredState", + "documentation":"

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + }, + "CurrentState":{ + "shape":"NaturalLanguageQueryGenerationCurrentState", + "documentation":"

The current state of the natural language query generation feature, indicating completion, in progress, or failure.

" + } + }, + "documentation":"

Container for parameters representing the state of the natural language query generation feature on the specified domain.

" + }, "NextToken":{ "type":"string", "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" @@ -6421,6 +6507,10 @@ "SoftwareUpdateOptions":{ "shape":"SoftwareUpdateOptions", "documentation":"

Service software update options for the domain.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsInput", + "documentation":"

Options for all machine learning features for the specified domain.

" } }, "documentation":"

Container for the request parameters to the UpdateDomain operation.

" diff --git a/botocore/data/sagemaker/2017-07-24/paginators-1.json b/botocore/data/sagemaker/2017-07-24/paginators-1.json index a80076cda1..d12fd9c57d 100644 --- a/botocore/data/sagemaker/2017-07-24/paginators-1.json +++ b/botocore/data/sagemaker/2017-07-24/paginators-1.json @@ -437,6 +437,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "TrackingServerSummaries" + }, + "ListOptimizationJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "OptimizationJobSummaries" } } } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 24d4b60108..e56dc5afc2 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -656,6 +656,20 @@ ], "documentation":"

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, + "CreateOptimizationJob":{ + "name":"CreateOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptimizationJobRequest"}, + "output":{"shape":"CreateOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify.

For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon SageMaker.

" + }, "CreatePipeline":{ "name":"CreatePipeline", "http":{ @@ -1344,6 +1358,18 @@ "input":{"shape":"DeleteNotebookInstanceLifecycleConfigInput"}, "documentation":"

Deletes a notebook instance lifecycle configuration.

" }, + "DeleteOptimizationJob":{ + "name":"DeleteOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an optimization job.

" + }, "DeletePipeline":{ "name":"DeletePipeline", "http":{ @@ -2058,6 +2084,19 @@ "output":{"shape":"DescribeNotebookInstanceLifecycleConfigOutput"}, "documentation":"

Returns a description of a notebook instance lifecycle configuration.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, + "DescribeOptimizationJob":{ + "name":"DescribeOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptimizationJobRequest"}, + "output":{"shape":"DescribeOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Provides the properties of the specified optimization job.

" + }, "DescribePipeline":{ "name":"DescribePipeline", "http":{ @@ -2961,6 +3000,16 @@ "output":{"shape":"ListNotebookInstancesOutput"}, "documentation":"

Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region.

" }, + "ListOptimizationJobs":{ + "name":"ListOptimizationJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOptimizationJobsRequest"}, + "output":{"shape":"ListOptimizationJobsResponse"}, + "documentation":"

Lists the optimization jobs in your account and their properties.

" + }, "ListPipelineExecutionSteps":{ "name":"ListPipelineExecutionSteps", "http":{ @@ -3491,6 +3540,18 @@ "input":{"shape":"StopNotebookInstanceInput"}, "documentation":"

Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

" }, + "StopOptimizationJob":{ + "name":"StopOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Ends a running inference optimization job.

" + }, "StopPipelineExecution":{ "name":"StopPipelineExecution", "http":{ @@ -4253,6 +4314,33 @@ "max":15, "min":1 }, + "AdditionalModelChannelName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9\\.\\-_]+" + }, + "AdditionalModelDataSource":{ + "type":"structure", + "required":[ + "ChannelName", + "S3DataSource" + ], + "members":{ + "ChannelName":{ + "shape":"AdditionalModelChannelName", + "documentation":"

A custom name for this AdditionalModelDataSource object.

" + }, + "S3DataSource":{"shape":"S3ModelDataSource"} + }, + "documentation":"

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + }, + "AdditionalModelDataSources":{ + "type":"list", + "member":{"shape":"AdditionalModelDataSource"}, + "max":5, + "min":0 + }, "AdditionalS3DataSource":{ "type":"structure", "required":[ @@ -4529,6 +4617,20 @@ }, "documentation":"

Specifies configurations for one or more training jobs that SageMaker runs to test the algorithm.

" }, + "AmazonQSettings":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"FeatureStatus", + "documentation":"

Whether Amazon Q has been enabled within the domain.

" + }, + "QProfileArn":{ + "shape":"QProfileArn", + "documentation":"

The ARN of the Amazon Q profile used within the domain.

" + } + }, + "documentation":"

A collection of settings that configure the Amazon Q experience within the domain.

" + }, "AnnotationConsolidationConfig":{ "type":"structure", "required":["AnnotationConsolidationLambdaArn"], @@ -7891,6 +7993,10 @@ "shape":"ModelDataSource", "documentation":"

Specifies the location of ML model data to deploy.

Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace.

" }, + "AdditionalModelDataSources":{ + "shape":"AdditionalModelDataSources", + "documentation":"

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + }, "Environment":{ "shape":"EnvironmentMap", "documentation":"

The environment variables to set in the Docker container.

The maximum length of each key and value in the Environment map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a CreateModel request, then the maximum length of all of their maps, combined, is also 32 KB.

" @@ -10201,6 +10307,67 @@ } } }, + "CreateOptimizationJobRequest":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "RoleArn", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "StoppingCondition" + ], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

A custom name for the new optimization job.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

During model optimization, Amazon SageMaker needs your permission to:

You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.

" + }, + "ModelSource":{ + "shape":"OptimizationJobModelSource", + "documentation":"

The location of the source model to optimize with an optimization job.

" + }, + "DeploymentInstanceType":{ + "shape":"OptimizationJobDeploymentInstanceType", + "documentation":"

The type of instance that hosts the optimized model that you create with the optimization job.

" + }, + "OptimizationEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

The environment variables to set in the model container.

" + }, + "OptimizationConfigs":{ + "shape":"OptimizationConfigs", + "documentation":"

Settings for each of the optimization techniques that the job applies.

" + }, + "OutputConfig":{ + "shape":"OptimizationJobOutputConfig", + "documentation":"

Details for where to store the optimized model that you create with the optimization job.

" + }, + "StoppingCondition":{"shape":"StoppingCondition"}, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

" + }, + "VpcConfig":{ + "shape":"OptimizationVpcConfig", + "documentation":"

A VPC in Amazon VPC that your optimized model has access to.

" + } + } + }, + "CreateOptimizationJobResponse":{ + "type":"structure", + "required":["OptimizationJobArn"], + "members":{ + "OptimizationJobArn":{ + "shape":"OptimizationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the optimization job.

" + } + } + }, "CreatePipelineRequest":{ "type":"structure", "required":[ @@ -12061,6 +12228,16 @@ } } }, + "DeleteOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + } + } + }, "DeletePipelineRequest":{ "type":"structure", "required":[ @@ -15681,6 +15858,99 @@ } } }, + "DescribeOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + } + } + }, + "DescribeOptimizationJobResponse":{ + "type":"structure", + "required":[ + "OptimizationJobArn", + "OptimizationJobStatus", + "CreationTime", + "LastModifiedTime", + "OptimizationJobName", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "RoleArn", + "StoppingCondition" + ], + "members":{ + "OptimizationJobArn":{ + "shape":"OptimizationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the optimization job.

" + }, + "OptimizationJobStatus":{ + "shape":"OptimizationJobStatus", + "documentation":"

The current status of the optimization job.

" + }, + "OptimizationStartTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job started.

" + }, + "OptimizationEndTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job finished processing.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time when you created the optimization job.

" + }, + "LastModifiedTime":{ + "shape":"LastModifiedTime", + "documentation":"

The time when the optimization job was last updated.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the optimization job status is FAILED, the reason for the failure.

" + }, + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + }, + "ModelSource":{ + "shape":"OptimizationJobModelSource", + "documentation":"

The location of the source model to optimize with an optimization job.

" + }, + "OptimizationEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

The environment variables to set in the model container.

" + }, + "DeploymentInstanceType":{ + "shape":"OptimizationJobDeploymentInstanceType", + "documentation":"

The type of instance that hosts the optimized model that you create with the optimization job.

" + }, + "OptimizationConfigs":{ + "shape":"OptimizationConfigs", + "documentation":"

Settings for each of the optimization techniques that the job applies.

" + }, + "OutputConfig":{ + "shape":"OptimizationJobOutputConfig", + "documentation":"

Details for where to store the optimized model that you create with the optimization job.

" + }, + "OptimizationOutput":{ + "shape":"OptimizationOutput", + "documentation":"

Output values produced by an optimization job.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role that you assigned to the optimization job.

" + }, + "StoppingCondition":{"shape":"StoppingCondition"}, + "VpcConfig":{ + "shape":"OptimizationVpcConfig", + "documentation":"

A VPC in Amazon VPC that your optimized model has access to.

" + } + } + }, "DescribePipelineDefinitionForExecutionRequest":{ "type":"structure", "required":["PipelineExecutionArn"], @@ -17143,6 +17413,10 @@ "DockerSettings":{ "shape":"DockerSettings", "documentation":"

A collection of settings that configure the domain's Docker interaction.

" + }, + "AmazonQSettings":{ + "shape":"AmazonQSettings", + "documentation":"

A collection of settings that configure the Amazon Q experience within the domain. The AuthMode that you use to create the domain must be SSO.

" } }, "documentation":"

A collection of settings that apply to the SageMaker Domain. These settings are specified through the CreateDomain API call.

" @@ -17165,6 +17439,10 @@ "DockerSettings":{ "shape":"DockerSettings", "documentation":"

A collection of settings that configure the domain's Docker interaction.

" + }, + "AmazonQSettings":{ + "shape":"AmazonQSettings", + "documentation":"

A collection of settings that configure the Amazon Q experience within the domain.

" } }, "documentation":"

A collection of Domain configuration settings to update.

" @@ -25406,6 +25684,78 @@ } } }, + "ListOptimizationJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that you use to get the next set of results following a truncated response. If the response to the previous request was truncated, that response provides the value for this token.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of optimization jobs to return in the response. The default is 50.

", + "box":true + }, + "CreationTimeAfter":{ + "shape":"CreationTime", + "documentation":"

Filters the results to only those optimization jobs that were created after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"CreationTime", + "documentation":"

Filters the results to only those optimization jobs that were created before the specified time.

" + }, + "LastModifiedTimeAfter":{ + "shape":"LastModifiedTime", + "documentation":"

Filters the results to only those optimization jobs that were updated after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"LastModifiedTime", + "documentation":"

Filters the results to only those optimization jobs that were updated before the specified time.

" + }, + "OptimizationContains":{ + "shape":"NameContains", + "documentation":"

Filters the results to only those optimization jobs that apply the specified optimization techniques. You can specify either Quantization or Compilation.

" + }, + "NameContains":{ + "shape":"NameContains", + "documentation":"

Filters the results to only those optimization jobs with a name that contains the specified string.

" + }, + "StatusEquals":{ + "shape":"OptimizationJobStatus", + "documentation":"

Filters the results to only those optimization jobs with the specified status.

" + }, + "SortBy":{ + "shape":"ListOptimizationJobsSortBy", + "documentation":"

The field by which to sort the optimization jobs in the response. The default is CreationTime

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Ascending

" + } + } + }, + "ListOptimizationJobsResponse":{ + "type":"structure", + "required":["OptimizationJobSummaries"], + "members":{ + "OptimizationJobSummaries":{ + "shape":"OptimizationJobSummaries", + "documentation":"

A list of optimization jobs and their properties that matches any of the filters you specified in the request.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use in a subsequent request to get the next set of results following a truncated response.

" + } + } + }, + "ListOptimizationJobsSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "ListPipelineExecutionStepsRequest":{ "type":"structure", "members":{ @@ -27111,6 +27461,20 @@ }, "documentation":"

Configures the timeout and maximum number of retries for processing a transform job invocation.

" }, + "ModelCompilationConfig":{ + "type":"structure", + "members":{ + "Image":{ + "shape":"OptimizationContainerImage", + "documentation":"

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

" + }, + "OverrideEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

Environment variables that override the default ones in the model container.

" + } + }, + "documentation":"

Settings for the model compilation technique that's applied by a model optimization job.

" + }, "ModelCompressionType":{ "type":"string", "enum":[ @@ -28135,6 +28499,20 @@ }, "documentation":"

The input for the model quality monitoring job. Currently endpoints are supported for input for model quality monitoring jobs.

" }, + "ModelQuantizationConfig":{ + "type":"structure", + "members":{ + "Image":{ + "shape":"OptimizationContainerImage", + "documentation":"

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

" + }, + "OverrideEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

Environment variables that override the default ones in the model container.

" + } + }, + "documentation":"

Settings for the model quantization technique that's applied by a model optimization job.

" + }, "ModelRegisterSettings":{ "type":"structure", "members":{ @@ -29609,6 +29987,244 @@ "In" ] }, + "OptimizationConfig":{ + "type":"structure", + "members":{ + "ModelQuantizationConfig":{ + "shape":"ModelQuantizationConfig", + "documentation":"

Settings for the model quantization technique that's applied by a model optimization job.

" + }, + "ModelCompilationConfig":{ + "shape":"ModelCompilationConfig", + "documentation":"

Settings for the model compilation technique that's applied by a model optimization job.

" + } + }, + "documentation":"

Settings for an optimization technique that you apply with a model optimization job.

", + "union":true + }, + "OptimizationConfigs":{ + "type":"list", + "member":{"shape":"OptimizationConfig"}, + "max":10 + }, + "OptimizationContainerImage":{ + "type":"string", + "max":255, + "pattern":"[\\S]+" + }, + "OptimizationJobArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:optimization-job/.*" + }, + "OptimizationJobDeploymentInstanceType":{ + "type":"string", + "enum":[ + "ml.p4d.24xlarge", + "ml.p4de.24xlarge", + "ml.p5.48xlarge", + "ml.g5.xlarge", + "ml.g5.2xlarge", + "ml.g5.4xlarge", + "ml.g5.8xlarge", + "ml.g5.12xlarge", + "ml.g5.16xlarge", + "ml.g5.24xlarge", + "ml.g5.48xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.12xlarge", + "ml.g6.16xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.inf2.xlarge", + "ml.inf2.8xlarge", + "ml.inf2.24xlarge", + "ml.inf2.48xlarge", + "ml.trn1.2xlarge", + "ml.trn1.32xlarge", + "ml.trn1n.32xlarge" + ] + }, + "OptimizationJobEnvironmentVariables":{ + "type":"map", + "key":{"shape":"NonEmptyString256"}, + "value":{"shape":"String256"}, + "max":25 + }, + "OptimizationJobModelSource":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"OptimizationJobModelSourceS3", + "documentation":"

The Amazon S3 location of a source model to optimize with an optimization job.

" + } + }, + "documentation":"

The location of the source model to optimize with an optimization job.

" + }, + "OptimizationJobModelSourceS3":{ + "type":"structure", + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

An Amazon S3 URI that locates a source model to optimize with an optimization job.

" + }, + "ModelAccessConfig":{ + "shape":"OptimizationModelAccessConfig", + "documentation":"

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + } + }, + "documentation":"

The Amazon S3 location of a source model to optimize with an optimization job.

" + }, + "OptimizationJobOutputConfig":{ + "type":"structure", + "required":["S3OutputLocation"], + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker uses they key to encrypt the artifacts of the optimized model when SageMaker uploads the model to Amazon S3.

" + }, + "S3OutputLocation":{ + "shape":"S3Uri", + "documentation":"

The Amazon S3 URI for where to store the optimized model that you create with an optimization job.

" + } + }, + "documentation":"

Details for where to store the optimized model that you create with the optimization job.

" + }, + "OptimizationJobStatus":{ + "type":"string", + "enum":[ + "INPROGRESS", + "COMPLETED", + "FAILED", + "STARTING", + "STOPPING", + "STOPPED" + ] + }, + "OptimizationJobSummaries":{ + "type":"list", + "member":{"shape":"OptimizationJobSummary"} + }, + "OptimizationJobSummary":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "OptimizationJobArn", + "CreationTime", + "OptimizationJobStatus", + "DeploymentInstanceType", + "OptimizationTypes" + ], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + }, + "OptimizationJobArn":{ + "shape":"OptimizationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the optimization job.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time when you created the optimization job.

" + }, + "OptimizationJobStatus":{ + "shape":"OptimizationJobStatus", + "documentation":"

The current status of the optimization job.

" + }, + "OptimizationStartTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job started.

" + }, + "OptimizationEndTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job finished processing.

" + }, + "LastModifiedTime":{ + "shape":"LastModifiedTime", + "documentation":"

The time when the optimization job was last updated.

" + }, + "DeploymentInstanceType":{ + "shape":"OptimizationJobDeploymentInstanceType", + "documentation":"

The type of instance that hosts the optimized model that you create with the optimization job.

" + }, + "OptimizationTypes":{ + "shape":"OptimizationTypes", + "documentation":"

The optimization techniques that are applied by the optimization job.

" + } + }, + "documentation":"

Summarizes an optimization job by providing some of its key properties.

" + }, + "OptimizationModelAcceptEula":{"type":"boolean"}, + "OptimizationModelAccessConfig":{ + "type":"structure", + "required":["AcceptEula"], + "members":{ + "AcceptEula":{ + "shape":"OptimizationModelAcceptEula", + "documentation":"

Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

" + } + }, + "documentation":"

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + }, + "OptimizationOutput":{ + "type":"structure", + "members":{ + "RecommendedInferenceImage":{ + "shape":"OptimizationContainerImage", + "documentation":"

The image that SageMaker recommends that you use to host the optimized model that you created with an optimization job.

" + } + }, + "documentation":"

Output values produced by an optimization job.

" + }, + "OptimizationType":{"type":"string"}, + "OptimizationTypes":{ + "type":"list", + "member":{"shape":"OptimizationType"} + }, + "OptimizationVpcConfig":{ + "type":"structure", + "required":[ + "SecurityGroupIds", + "Subnets" + ], + "members":{ + "SecurityGroupIds":{ + "shape":"OptimizationVpcSecurityGroupIds", + "documentation":"

The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.

" + }, + "Subnets":{ + "shape":"OptimizationVpcSubnets", + "documentation":"

The ID of the subnets in the VPC to which you want to connect your optimized model.

" + } + }, + "documentation":"

A VPC in Amazon VPC that's accessible to an optimized that you create with an optimization job. You can control access to and from your resources by configuring a VPC. For more information, see Give SageMaker Access to Resources in your Amazon VPC.

" + }, + "OptimizationVpcSecurityGroupId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSecurityGroupIds":{ + "type":"list", + "member":{"shape":"OptimizationVpcSecurityGroupId"}, + "max":5, + "min":1 + }, + "OptimizationVpcSubnetId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSubnets":{ + "type":"list", + "member":{"shape":"OptimizationVpcSubnetId"}, + "max":16, + "min":1 + }, "OptionalDouble":{"type":"double"}, "OptionalInteger":{"type":"integer"}, "OptionalVolumeSizeInGB":{ @@ -31733,6 +32349,10 @@ } } }, + "QProfileArn":{ + "type":"string", + "pattern":"^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$" + }, "QualityCheckStepMetadata":{ "type":"structure", "members":{ @@ -34424,6 +35044,16 @@ } } }, + "StopOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + } + } + }, "StopPipelineExecutionRequest":{ "type":"structure", "required":[ @@ -34497,7 +35127,7 @@ "documentation":"

The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped.

" } }, - "documentation":"

Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" + "documentation":"

Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" }, "StorageType":{ "type":"string", From 561839bed755ed58e42353ba7e98acf74fb908bc Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 9 Jul 2024 18:08:13 +0000 Subject: [PATCH 2/3] Update endpoints model --- botocore/data/endpoints.json | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 312484c9c1..56460f6da0 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -28523,31 +28523,17 @@ }, "redshift" : { "endpoints" : { - "fips-us-iso-east-1" : { + "us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "redshift.us-iso-east-1.c2s.ic.gov" }, - "fips-us-iso-west-1" : { + "us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-iso-west-1.c2s.ic.gov" } } }, @@ -29211,18 +29197,11 @@ }, "redshift" : { "endpoints" : { - "fips-us-isob-east-1" : { + "us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-isob-east-1.sc2s.sgov.gov" } } }, From 231c2b5db4e553b60bccabb2eee851f67f690d40 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 9 Jul 2024 18:09:04 +0000 Subject: [PATCH 3/3] Bumping version to 1.34.142 --- .changes/1.34.142.json | 22 +++++++++++++++++++ .../api-change-datazone-24284.json | 5 ----- .../next-release/api-change-fsx-63480.json | 5 ----- .../api-change-opensearch-6019.json | 5 ----- .../api-change-sagemaker-55808.json | 5 ----- CHANGELOG.rst | 9 ++++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 8 files changed, 33 insertions(+), 22 deletions(-) create mode 100644 .changes/1.34.142.json delete mode 100644 .changes/next-release/api-change-datazone-24284.json delete mode 100644 .changes/next-release/api-change-fsx-63480.json delete mode 100644 .changes/next-release/api-change-opensearch-6019.json delete mode 100644 .changes/next-release/api-change-sagemaker-55808.json diff --git a/.changes/1.34.142.json b/.changes/1.34.142.json new file mode 100644 index 0000000000..35ab99cbfa --- /dev/null +++ b/.changes/1.34.142.json @@ -0,0 +1,22 @@ +[ + { + "category": "``datazone``", + "description": "This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes", + "type": "api-change" + }, + { + "category": "``fsx``", + "description": "Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-datazone-24284.json b/.changes/next-release/api-change-datazone-24284.json deleted file mode 100644 index cb63ed8eee..0000000000 --- a/.changes/next-release/api-change-datazone-24284.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``datazone``", - "description": "This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes" -} diff --git a/.changes/next-release/api-change-fsx-63480.json b/.changes/next-release/api-change-fsx-63480.json deleted file mode 100644 index 8edc84d0e4..0000000000 --- a/.changes/next-release/api-change-fsx-63480.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``fsx``", - "description": "Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems." -} diff --git a/.changes/next-release/api-change-opensearch-6019.json b/.changes/next-release/api-change-opensearch-6019.json deleted file mode 100644 index 861c639b06..0000000000 --- a/.changes/next-release/api-change-opensearch-6019.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``opensearch``", - "description": "This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down." -} diff --git a/.changes/next-release/api-change-sagemaker-55808.json b/.changes/next-release/api-change-sagemaker-55808.json deleted file mode 100644 index 2fa5b19186..0000000000 --- a/.changes/next-release/api-change-sagemaker-55808.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``sagemaker``", - "description": "This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 671339d61b..c79cb8ad35 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,15 @@ CHANGELOG ========= +1.34.142 +======== + +* api-change:``datazone``: This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes +* api-change:``fsx``: Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems. +* api-change:``opensearch``: This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down. +* api-change:``sagemaker``: This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action. + + 1.34.141 ======== diff --git a/botocore/__init__.py b/botocore/__init__.py index da3cbb04d3..34112251e0 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.141' +__version__ = '1.34.142' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index b7703aaaec..27543b5eee 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.34.1' # The full version, including alpha/beta/rc tags. -release = '1.34.141' +release = '1.34.142' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.